dnn: move module from opencv_contrib

e6f63c7a38/modules/dnn
This commit is contained in:
Alexander Alekhin 2017-06-26 13:35:51 +03:00
parent ef692350e0
commit 93729784bb
143 changed files with 135205 additions and 0 deletions

View File

@ -0,0 +1,84 @@
if(WINRT)
ocv_module_disable(dnn)
endif()
include(${OpenCV_SOURCE_DIR}/cmake/OpenCVFindLibProtobuf.cmake)
if(NOT Protobuf_FOUND)
ocv_module_disable(opencv_dnn)
endif()
set(the_description "Deep neural network module. It allows to load models from different frameworks and to make forward pass")
ocv_add_module(dnn opencv_core opencv_imgproc WRAP python matlab)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-shadow -Wno-parentheses -Wmaybe-uninitialized -Wsign-promo
-Wmissing-declarations -Wmissing-prototypes
)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4701 /wd4100)
if(MSVC)
add_definitions( -D_CRT_SECURE_NO_WARNINGS=1 )
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4244 /wd4267 /wd4018 /wd4355 /wd4800 /wd4251 /wd4996 /wd4146
/wd4305 /wd4127 /wd4100 /wd4512 /wd4125 /wd4389 /wd4510 /wd4610
/wd4702 /wd4456 /wd4457 /wd4065 /wd4310 /wd4661 /wd4506
)
else()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated -Wmissing-prototypes -Wmissing-declarations -Wshadow
-Wunused-parameter -Wunused-local-typedefs -Wsign-compare -Wsign-promo
-Wundef -Wtautological-undefined-compare -Wignored-qualifiers -Wextra
-Wunused-function -Wunused-const-variable -Wdeprecated-declarations
)
endif()
if(APPLE_FRAMEWORK)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshorten-64-to-32)
endif()
if(ANDROID)
add_definitions(-DDISABLE_POSIX_MEMALIGN -DTH_DISABLE_HEAP_TRACKING)
endif()
#supress warnings in autogenerated caffe.pb.* files
add_definitions(-DHAVE_PROTOBUF=1)
ocv_warnings_disable(CMAKE_CXX_FLAGS
-Wunused-parameter -Wundef -Wignored-qualifiers -Wno-enum-compare
-Wdeprecated-declarations
/wd4125 /wd4267 /wd4127 /wd4244 /wd4512 /wd4702
/wd4456 /wd4510 /wd4610 /wd4800
-wd858 -wd2196
)
if(PROTOBUF_UPDATE_FILES)
file(GLOB proto_files src/tensorflow/*.proto)
list(APPEND proto_files src/caffe/caffe.proto)
PROTOBUF_GENERATE_CPP(Protobuf_HDRS Protobuf_SRCS ${proto_files})
else()
file(GLOB fw_srcs ${CMAKE_CURRENT_SOURCE_DIR}/misc/tensorflow/*.cc)
file(GLOB fw_hdrs ${CMAKE_CURRENT_SOURCE_DIR}/misc/tensorflow/*.h)
list(APPEND fw_srcs ${CMAKE_CURRENT_SOURCE_DIR}/misc/caffe/caffe.pb.cc)
list(APPEND fw_hdrs ${CMAKE_CURRENT_SOURCE_DIR}/misc/caffe/caffe.pb.h)
list(APPEND Protobuf_SRCS ${fw_srcs})
list(APPEND Protobuf_HDRS ${fw_hdrs})
list(APPEND Protobuf_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/misc/caffe)
list(APPEND Protobuf_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/misc/tensorflow)
endif()
ocv_source_group("Src\\protobuf" FILES ${Protobuf_SRCS} ${Protobuf_HDRS})
ocv_module_include_directories(include ${Protobuf_INCLUDE_DIRS})
ocv_glob_module_sources(${Protobuf_SRCS} ${Protobuf_HDRS} ${CBLAS_H_PROXY_PATH})
ocv_create_module(${Protobuf_LIBRARIES} ${LAPACK_LIBRARIES})
ocv_add_samples()
ocv_add_accuracy_tests()
ocv_add_perf_tests()
# ----------------------------------------------------------------------------
# Torch7 importer of blobs and models, produced by Torch.nn module
# ----------------------------------------------------------------------------
OCV_OPTION(${the_module}_BUILD_TORCH_IMPORTER "Build Torch model importer" ON)
if(${the_module}_BUILD_TORCH_IMPORTER)
message(STATUS "Torch importer has been enabled. To run the tests you have to install Torch "
"('th' executable should be available) "
"and generate testdata using opencv_extra/testdata/dnn/generate_torch_models.py script.")
add_definitions(-DENABLE_TORCH_IMPORTER=1)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4702 /wd4127 /wd4267) #supress warnings in original torch files
endif()

2
modules/dnn/README.md Normal file
View File

@ -0,0 +1,2 @@
Deep Neural Network module
==========================

View File

@ -0,0 +1,64 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_HPP__
#define __OPENCV_DNN_HPP__
// This is an umbrealla header to include into you project.
// We are free to change headers layout in dnn subfolder, so please include
// this header for future compartibility
/** @defgroup dnn Deep Neural Network module
@{
This module contains:
- API for new layers creation, layers are building bricks of neural networks;
- set of built-in most-useful Layers;
- API to constuct and modify comprehensive neural networks from layers;
- functionality for loading serialized networks models from differnet frameworks.
Functionality of this module is designed only for forward pass computations (i. e. network testing).
A network training is in principle not supported.
@}
*/
#include <opencv2/dnn/dnn.hpp>
#endif /* __OPENCV_DNN_HPP__ */

View File

@ -0,0 +1,471 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_DNN_ALL_LAYERS_HPP__
#define __OPENCV_DNN_DNN_ALL_LAYERS_HPP__
#include <opencv2/dnn.hpp>
namespace cv
{
namespace dnn
{
//! @addtogroup dnn
//! @{
/** @defgroup dnnLayerList Partial List of Implemented Layers
@{
This subsection of dnn module contains information about bult-in layers and their descriptions.
Classes listed here, in fact, provides C++ API for creating intances of bult-in layers.
In addition to this way of layers instantiation, there is a more common factory API (see @ref dnnLayerFactory), it allows to create layers dynamically (by name) and register new ones.
You can use both API, but factory API is less convinient for native C++ programming and basically designed for use inside importers (see @ref Importer, @ref createCaffeImporter(), @ref createTorchImporter()).
Bult-in layers partially reproduce functionality of corresponding Caffe and Torch7 layers.
In partuclar, the following layers and Caffe @ref Importer were tested to reproduce <a href="http://caffe.berkeleyvision.org/tutorial/layers.html">Caffe</a> functionality:
- Convolution
- Deconvolution
- Pooling
- InnerProduct
- TanH, ReLU, Sigmoid, BNLL, Power, AbsVal
- Softmax
- Reshape, Flatten, Slice, Split
- LRN
- MVN
- Dropout (since it does nothing on forward pass -))
*/
class CV_EXPORTS BlankLayer : public Layer
{
public:
static Ptr<BlankLayer> create(const LayerParams &params);
};
//! LSTM recurrent layer
class CV_EXPORTS LSTMLayer : public Layer
{
public:
/** Creates instance of LSTM layer */
static Ptr<LSTMLayer> create(const LayerParams& params);
/** Set trained weights for LSTM layer.
LSTM behavior on each step is defined by current input, previous output, previous cell state and learned weights.
Let @f$x_t@f$ be current input, @f$h_t@f$ be current output, @f$c_t@f$ be current state.
Than current output and current cell state is computed as follows:
@f{eqnarray*}{
h_t &= o_t \odot tanh(c_t), \\
c_t &= f_t \odot c_{t-1} + i_t \odot g_t, \\
@f}
where @f$\odot@f$ is per-element multiply operation and @f$i_t, f_t, o_t, g_t@f$ is internal gates that are computed using learned wights.
Gates are computed as follows:
@f{eqnarray*}{
i_t &= sigmoid&(W_{xi} x_t + W_{hi} h_{t-1} + b_i), \\
f_t &= sigmoid&(W_{xf} x_t + W_{hf} h_{t-1} + b_f), \\
o_t &= sigmoid&(W_{xo} x_t + W_{ho} h_{t-1} + b_o), \\
g_t &= tanh &(W_{xg} x_t + W_{hg} h_{t-1} + b_g), \\
@f}
where @f$W_{x?}@f$, @f$W_{h?}@f$ and @f$b_{?}@f$ are learned weights represented as matrices:
@f$W_{x?} \in R^{N_h \times N_x}@f$, @f$W_{h?} \in R^{N_h \times N_h}@f$, @f$b_? \in R^{N_h}@f$.
For simplicity and performance purposes we use @f$ W_x = [W_{xi}; W_{xf}; W_{xo}, W_{xg}] @f$
(i.e. @f$W_x@f$ is vertical contacentaion of @f$ W_{x?} @f$), @f$ W_x \in R^{4N_h \times N_x} @f$.
The same for @f$ W_h = [W_{hi}; W_{hf}; W_{ho}, W_{hg}], W_h \in R^{4N_h \times N_h} @f$
and for @f$ b = [b_i; b_f, b_o, b_g]@f$, @f$b \in R^{4N_h} @f$.
@param Wh is matrix defining how previous output is transformed to internal gates (i.e. according to abovemtioned notation is @f$ W_h @f$)
@param Wx is matrix defining how current input is transformed to internal gates (i.e. according to abovemtioned notation is @f$ W_x @f$)
@param b is bias vector (i.e. according to abovemtioned notation is @f$ b @f$)
*/
virtual void setWeights(const Mat &Wh, const Mat &Wx, const Mat &b) = 0;
/** @brief Specifies shape of output blob which will be [[`T`], `N`] + @p outTailShape.
* @details If this parameter is empty or unset then @p outTailShape = [`Wh`.size(0)] will be used,
* where `Wh` is parameter from setWeights().
*/
virtual void setOutShape(const MatShape &outTailShape = MatShape()) = 0;
/** @brief Specifies either interpet first dimension of input blob as timestamp dimenion either as sample.
*
* If flag is set to true then shape of input blob will be interpeted as [`T`, `N`, `[data dims]`] where `T` specifies number of timpestamps, `N` is number of independent streams.
* In this case each forward() call will iterate through `T` timestamps and update layer's state `T` times.
*
* If flag is set to false then shape of input blob will be interpeted as [`N`, `[data dims]`].
* In this case each forward() call will make one iteration and produce one timestamp with shape [`N`, `[out dims]`].
*/
virtual void setUseTimstampsDim(bool use = true) = 0;
/** @brief If this flag is set to true then layer will produce @f$ c_t @f$ as second output.
* @details Shape of the second output is the same as first output.
*/
virtual void setProduceCellOutput(bool produce = false) = 0;
/* In common case it use single input with @f$x_t@f$ values to compute output(s) @f$h_t@f$ (and @f$c_t@f$).
* @param input should contain packed values @f$x_t@f$
* @param output contains computed outputs: @f$h_t@f$ (and @f$c_t@f$ if setProduceCellOutput() flag was set to true).
*
* If setUseTimstampsDim() is set to true then @p input[0] should has at least two dimensions with the following shape: [`T`, `N`, `[data dims]`],
* where `T` specifies number of timpestamps, `N` is number of independent streams (i.e. @f$ x_{t_0 + t}^{stream} @f$ is stored inside @p input[0][t, stream, ...]).
*
* If setUseTimstampsDim() is set to fase then @p input[0] should contain single timestamp, its shape should has form [`N`, `[data dims]`] with at least one dimension.
* (i.e. @f$ x_{t}^{stream} @f$ is stored inside @p input[0][stream, ...]).
*/
int inputNameToIndex(String inputName);
int outputNameToIndex(String outputName);
};
//! Classical recurrent layer
class CV_EXPORTS RNNLayer : public Layer
{
public:
/** Creates instance of RNNLayer */
static Ptr<RNNLayer> create(const LayerParams& params);
/** Setups learned weights.
Recurrent-layer behavior on each step is defined by current input @f$ x_t @f$, previous state @f$ h_t @f$ and learned weights as follows:
@f{eqnarray*}{
h_t &= tanh&(W_{hh} h_{t-1} + W_{xh} x_t + b_h), \\
o_t &= tanh&(W_{ho} h_t + b_o),
@f}
@param Wxh is @f$ W_{xh} @f$ matrix
@param bh is @f$ b_{h} @f$ vector
@param Whh is @f$ W_{hh} @f$ matrix
@param Who is @f$ W_{xo} @f$ matrix
@param bo is @f$ b_{o} @f$ vector
*/
virtual void setWeights(const Mat &Wxh, const Mat &bh, const Mat &Whh, const Mat &Who, const Mat &bo) = 0;
/** @brief If this flag is set to true then layer will produce @f$ h_t @f$ as second output.
* @details Shape of the second output is the same as first output.
*/
virtual void setProduceHiddenOutput(bool produce = false) = 0;
/** Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$.
@param input should contain packed input @f$x_t@f$.
@param output should contain output @f$o_t@f$ (and @f$h_t@f$ if setProduceHiddenOutput() is set to true).
@p input[0] should have shape [`T`, `N`, `data_dims`] where `T` and `N` is number of timestamps and number of independent samples of @f$x_t@f$ respectively.
@p output[0] will have shape [`T`, `N`, @f$N_o@f$], where @f$N_o@f$ is number of rows in @f$ W_{xo} @f$ matrix.
If setProduceHiddenOutput() is set to true then @p output[1] will contain a Mat with shape [`T`, `N`, @f$N_h@f$], where @f$N_h@f$ is number of rows in @f$ W_{hh} @f$ matrix.
*/
};
class CV_EXPORTS BaseConvolutionLayer : public Layer
{
public:
Size kernel, stride, pad, dilation, adjustPad;
String padMode;
};
class CV_EXPORTS ActivationLayer;
class CV_EXPORTS BatchNormLayer;
class CV_EXPORTS ConvolutionLayer : public BaseConvolutionLayer
{
public:
virtual bool setActivation(const Ptr<ActivationLayer>& layer) = 0;
virtual bool setBatchNorm(const Ptr<BatchNormLayer>& layer) = 0;
static Ptr<BaseConvolutionLayer> create(const LayerParams& params);
};
class CV_EXPORTS DeconvolutionLayer : public BaseConvolutionLayer
{
public:
static Ptr<BaseConvolutionLayer> create(const LayerParams& params);
};
class CV_EXPORTS LRNLayer : public Layer
{
public:
enum Type
{
CHANNEL_NRM,
SPATIAL_NRM
};
int type;
int size;
float alpha, beta, bias;
bool normBySize;
static Ptr<LRNLayer> create(const LayerParams& params);
};
class CV_EXPORTS PoolingLayer : public Layer
{
public:
enum Type
{
MAX,
AVE,
STOCHASTIC
};
int type;
Size kernel, stride, pad;
bool globalPooling;
bool computeMaxIdx;
String padMode;
static Ptr<PoolingLayer> create(const LayerParams& params);
};
class CV_EXPORTS SoftmaxLayer : public Layer
{
public:
bool logSoftMax;
static Ptr<SoftmaxLayer> create(const LayerParams& params);
};
class CV_EXPORTS InnerProductLayer : public Layer
{
public:
int axis;
static Ptr<InnerProductLayer> create(const LayerParams& params);
};
class CV_EXPORTS MVNLayer : public Layer
{
public:
float eps;
bool normVariance, acrossChannels;
static Ptr<MVNLayer> create(const LayerParams& params);
};
/* Reshaping */
class CV_EXPORTS ReshapeLayer : public Layer
{
public:
MatShape newShapeDesc;
Range newShapeRange;
static Ptr<ReshapeLayer> create(const LayerParams& params);
};
class CV_EXPORTS FlattenLayer : public Layer
{
public:
static Ptr<FlattenLayer> create(const LayerParams &params);
};
class CV_EXPORTS ConcatLayer : public Layer
{
public:
int axis;
static Ptr<ConcatLayer> create(const LayerParams &params);
};
class CV_EXPORTS SplitLayer : public Layer
{
public:
int outputsCount; //!< Number of copies that will be produced (is ignored when negative).
static Ptr<SplitLayer> create(const LayerParams &params);
};
class CV_EXPORTS SliceLayer : public Layer
{
public:
int axis;
std::vector<int> sliceIndices;
static Ptr<SliceLayer> create(const LayerParams &params);
};
class CV_EXPORTS PermuteLayer : public Layer
{
public:
static Ptr<PermuteLayer> create(const LayerParams& params);
};
class CV_EXPORTS PaddingLayer : public Layer
{
public:
static Ptr<PaddingLayer> create(const LayerParams& params);
};
/* Activations */
class CV_EXPORTS ActivationLayer : public Layer
{
public:
virtual void forwardSlice(const float* src, float* dst, int len,
size_t outPlaneSize, int cn0, int cn1) const = 0;
};
class CV_EXPORTS ReLULayer : public ActivationLayer
{
public:
float negativeSlope;
static Ptr<ReLULayer> create(const LayerParams &params);
};
class CV_EXPORTS ChannelsPReLULayer : public ActivationLayer
{
public:
static Ptr<ChannelsPReLULayer> create(const LayerParams& params);
};
class CV_EXPORTS TanHLayer : public ActivationLayer
{
public:
static Ptr<TanHLayer> create(const LayerParams &params);
};
class CV_EXPORTS SigmoidLayer : public ActivationLayer
{
public:
static Ptr<SigmoidLayer> create(const LayerParams &params);
};
class CV_EXPORTS BNLLLayer : public ActivationLayer
{
public:
static Ptr<BNLLLayer> create(const LayerParams &params);
};
class CV_EXPORTS AbsLayer : public ActivationLayer
{
public:
static Ptr<AbsLayer> create(const LayerParams &params);
};
class CV_EXPORTS PowerLayer : public ActivationLayer
{
public:
float power, scale, shift;
static Ptr<PowerLayer> create(const LayerParams &params);
};
/* Layers used in semantic segmentation */
class CV_EXPORTS CropLayer : public Layer
{
public:
int startAxis;
std::vector<int> offset;
static Ptr<CropLayer> create(const LayerParams &params);
};
class CV_EXPORTS EltwiseLayer : public Layer
{
public:
enum EltwiseOp
{
PROD = 0,
SUM = 1,
MAX = 2,
};
static Ptr<EltwiseLayer> create(const LayerParams &params);
};
class CV_EXPORTS BatchNormLayer : public Layer
{
public:
bool hasWeights, hasBias;
float epsilon;
virtual void getScaleShift(Mat& scale, Mat& shift) const = 0;
static Ptr<BatchNormLayer> create(const LayerParams &params);
};
class CV_EXPORTS MaxUnpoolLayer : public Layer
{
public:
Size poolKernel;
Size poolPad;
Size poolStride;
static Ptr<MaxUnpoolLayer> create(const LayerParams &params);
};
class CV_EXPORTS ScaleLayer : public Layer
{
public:
bool hasBias;
static Ptr<ScaleLayer> create(const LayerParams& params);
};
class CV_EXPORTS ShiftLayer : public Layer
{
public:
static Ptr<ShiftLayer> create(const LayerParams& params);
};
class CV_EXPORTS PriorBoxLayer : public Layer
{
public:
static Ptr<PriorBoxLayer> create(const LayerParams& params);
};
class CV_EXPORTS DetectionOutputLayer : public Layer
{
public:
static Ptr<DetectionOutputLayer> create(const LayerParams& params);
};
class NormalizeBBoxLayer : public Layer
{
public:
static Ptr<NormalizeBBoxLayer> create(const LayerParams& params);
};
//! @}
//! @}
}
}
#endif

View File

@ -0,0 +1,146 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_DNN_DICT_HPP__
#define __OPENCV_DNN_DNN_DICT_HPP__
#include <opencv2/core.hpp>
#include <map>
#include <ostream>
namespace cv
{
namespace dnn
{
//! @addtogroup dnn
//! @{
/** @brief This struct stores the scalar value (or array) of one of the following type: double, cv::String or int64.
* @todo Maybe int64 is useless because double type exactly stores at least 2^52 integers.
*/
struct DictValue
{
DictValue(const DictValue &r);
DictValue(int64 i = 0) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
DictValue(int i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar
DictValue(double p) : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; } //!< Constructs floating point scalar
DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< Constructs string scalar
DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< @overload
template<typename TypeIter>
static DictValue arrayInt(TypeIter begin, int size); //!< Constructs integer array
template<typename TypeIter>
static DictValue arrayReal(TypeIter begin, int size); //!< Constructs floating point array
template<typename TypeIter>
static DictValue arrayString(TypeIter begin, int size); //!< Constructs array of strings
template<typename T>
T get(int idx = -1) const; //!< Tries to convert array element with specified index to requested type and returns its.
int size() const;
bool isInt() const;
bool isString() const;
bool isReal() const;
DictValue &operator=(const DictValue &r);
friend std::ostream &operator<<(std::ostream &stream, const DictValue &dictv);
~DictValue();
private:
int type;
union
{
AutoBuffer<int64, 1> *pi;
AutoBuffer<double, 1> *pd;
AutoBuffer<String, 1> *ps;
void *pv;
};
DictValue(int _type, void *_p) : type(_type), pv(_p) {}
void release();
};
/** @brief This class implements name-value dictionary, values are instances of DictValue. */
class CV_EXPORTS Dict
{
typedef std::map<String, DictValue> _Dict;
_Dict dict;
public:
//! Checks a presence of the @p key in the dictionary.
bool has(const String &key) const;
//! If the @p key in the dictionary then returns pointer to its value, else returns NULL.
DictValue *ptr(const String &key);
/** @overload */
const DictValue *ptr(const String &key) const;
//! If the @p key in the dictionary then returns its value, else an error will be generated.
const DictValue &get(const String &key) const;
/** @overload */
template <typename T>
T get(const String &key) const;
//! If the @p key in the dictionary then returns its value, else returns @p defaultValue.
template <typename T>
T get(const String &key, const T &defaultValue) const;
//! Sets new @p value for the @p key, or adds new key-value pair into the dictionary.
template<typename T>
const T &set(const String &key, const T &value);
friend std::ostream &operator<<(std::ostream &stream, const Dict &dict);
};
//! @}
}
}
#endif

View File

@ -0,0 +1,655 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_DNN_HPP__
#define __OPENCV_DNN_DNN_HPP__
#include <vector>
#include <opencv2/core.hpp>
#include <opencv2/dnn/dict.hpp>
namespace cv
{
namespace dnn //! This namespace is used for dnn module functionlaity.
{
//! @addtogroup dnn
//! @{
typedef std::vector<int> MatShape;
/**
* @brief Enum of computation backends supported by layers.
*/
enum Backend
{
DNN_BACKEND_DEFAULT,
DNN_BACKEND_HALIDE
};
/**
* @brief Enum of target devices for computations.
*/
enum Target
{
DNN_TARGET_CPU,
DNN_TARGET_OPENCL
};
/** @brief Initialize dnn module and built-in layers.
*
* This function automatically called on most of OpenCV builds,
* but you need to call it manually on some specific configurations (iOS for example).
*/
CV_EXPORTS_W void initModule();
/** @brief This class provides all data needed to initialize layer.
*
* It includes dictionary with scalar params (which can be readed by using Dict interface),
* blob params #blobs and optional meta information: #name and #type of layer instance.
*/
class CV_EXPORTS LayerParams : public Dict
{
public:
//TODO: Add ability to name blob params
std::vector<Mat> blobs; //!< List of learned parameters stored as blobs.
String name; //!< Name of the layer instance (optional, can be used internal purposes).
String type; //!< Type name which was used for creating layer by layer factory (optional).
};
/**
* @brief Derivatives of this class encapsulates functions of certain backends.
*/
class BackendNode
{
public:
BackendNode(int backendId);
virtual ~BackendNode(); //!< Virtual destructor to make polymorphism.
int backendId; //!< Backend identifier.
};
/**
* @brief Derivatives of this class wraps cv::Mat for different backends and targets.
*/
class BackendWrapper
{
public:
BackendWrapper(int backendId, int targetId);
/**
* @brief Wrap cv::Mat for specific backend and target.
* @param[in] targetId Target identifier.
* @param[in] m cv::Mat for wrapping.
*
* Make CPU->GPU data transfer if it's require for the target.
*/
BackendWrapper(int targetId, const cv::Mat& m);
/**
* @brief Make wrapper for reused cv::Mat.
* @param[in] base Wrapper of cv::Mat that will be reused.
* @param[in] shape Specific shape.
*
* Initialize wrapper from another one. It'll wrap the same host CPU
* memory and mustn't allocate memory on device(i.e. GPU). It might
* has different shape. Use in case of CPU memory reusing for reuse
* associented memory on device too.
*/
BackendWrapper(const Ptr<BackendWrapper>& base, const MatShape& shape);
virtual ~BackendWrapper(); //!< Virtual destructor to make polymorphism.
/**
* @brief Transfer data to CPU host memory.
*/
virtual void copyToHost() = 0;
int backendId; //!< Backend identifier.
int targetId; //!< Target identifier.
};
/** @brief This interface class allows to build new Layers - are building blocks of networks.
*
* Each class, derived from Layer, must implement allocate() methods to declare own outputs and forward() to compute outputs.
* Also before using the new layer into networks you must register your layer by using one of @ref dnnLayerFactory "LayerFactory" macros.
*/
class CV_EXPORTS_W Layer
{
public:
//! List of learned parameters must be stored here to allow read them by using Net::getParam().
CV_PROP_RW std::vector<Mat> blobs;
/** @brief Computes and sets internal parameters according to inputs, outputs and blobs.
* @param[in] input vector of already allocated input blobs
* @param[out] output vector of already allocated output blobs
*
* If this method is called after network has allocated all memory for input and output blobs
* and before inferencing.
*/
virtual void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output);
/** @brief Given the @p input blobs, computes the output @p blobs.
* @param[in] input the input blobs.
* @param[out] output allocated output blobs, which will store results of the computation.
* @param[out] internals allocated internal blobs
*/
virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) = 0;
/** @brief @overload */
CV_WRAP void finalize(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
/** @brief @overload */
CV_WRAP std::vector<Mat> finalize(const std::vector<Mat> &inputs);
/** @brief @overload */
CV_WRAP void forward(const std::vector<Mat> &inputs, CV_IN_OUT std::vector<Mat> &outputs,
CV_IN_OUT std::vector<Mat> &internals);
/** @brief Allocates layer and computes output. */
CV_WRAP void run(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs,
CV_IN_OUT std::vector<Mat> &internals);
/** @brief Returns index of input blob into the input array.
* @param inputName label of input blob
*
* Each layer input and output can be labeled to easily identify them using "%<layer_name%>[.output_name]" notation.
* This method maps label of input blob to its index into input vector.
*/
virtual int inputNameToIndex(String inputName);
/** @brief Returns index of output blob in output array.
* @see inputNameToIndex()
*/
virtual int outputNameToIndex(String outputName);
/**
* @brief Ask layer if it support specific backend for doing computations.
* @param[in] backendId computation backend identifier.
* @see Backend
*/
virtual bool supportBackend(int backendId);
/**
* @brief Returns Halide backend node.
* @param[in] inputs Input Halide buffers.
* @see BackendNode, BackendWrapper
*
* Input buffers should be exactly the same that will be used in forward invocations.
* Despite we can use Halide::ImageParam based on input shape only,
* it helps prevent some memory management issues (if something wrong,
* Halide tests will be failed).
*/
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs);
/**
* @brief Automatic Halide scheduling based on layer hyper-parameters.
* @param[in] node Backend node with Halide functions.
* @param[in] inputs Blobs that will be used in forward invocations.
* @param[in] outputs Blobs that will be used in forward invocations.
* @param[in] targetId Target identifier
* @see BackendNode, Target
*
* Layer don't use own Halide::Func members because we can have applied
* layers fusing. In this way the fused function should be scheduled.
*/
virtual void applyHalideScheduler(Ptr<BackendNode>& node,
const std::vector<Mat*> &inputs,
const std::vector<Mat> &outputs,
int targetId) const;
/**
* @brief Implement layers fusing.
* @param[in] node Backend node of bottom layer.
* @see BackendNode
*
* Actual for graph-based backends. If layer attached successfully,
* returns non-empty cv::Ptr to node of the same backend.
* Fuse only over the last function.
*/
virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node);
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const;
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const {(void)inputs; (void)outputs; return 0;}
CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes.
CV_PROP String type; //!< Type name which was used for creating layer by layer factory.
Layer();
explicit Layer(const LayerParams &params); //!< Initializes only #name, #type and #blobs fields.
void setParamsFrom(const LayerParams &params); //!< Initializes only #name, #type and #blobs fields.
virtual ~Layer();
};
/** @brief This class allows to create and manipulate comprehensive artificial neural networks.
*
* Neural network is presented as directed acyclic graph (DAG), where vertices are Layer instances,
* and edges specify relationships between layers inputs and outputs.
*
* Each network layer has unique integer id and unique string name inside its network.
* LayerId can store either layer name or layer id.
*
* This class supports reference counting of its instances, i. e. copies point to the same instance.
*/
class CV_EXPORTS_W_SIMPLE Net
{
public:
CV_WRAP Net(); //!< Default constructor.
CV_WRAP ~Net(); //!< Destructor frees the net only if there aren't references to the net anymore.
/** Returns true if there are no layers in the network. */
CV_WRAP bool empty() const;
/** @brief Adds new layer to the net.
* @param name unique name of the adding layer.
* @param type typename of the adding layer (type must be registered in LayerRegister).
* @param params parameters which will be used to initialize the creating layer.
* @returns unique identifier of created layer, or -1 if a failure will happen.
*/
int addLayer(const String &name, const String &type, LayerParams &params);
/** @brief Adds new layer and connects its first input to the first output of previously added layer.
* @see addLayer()
*/
int addLayerToPrev(const String &name, const String &type, LayerParams &params);
/** @brief Converts string name of the layer to the integer identifier.
* @returns id of the layer, or -1 if the layer wasn't found.
*/
CV_WRAP int getLayerId(const String &layer);
CV_WRAP std::vector<String> getLayerNames() const;
/** @brief Container for strings and integers. */
typedef DictValue LayerId;
/** @brief Returns pointer to layer with specified name which the network use. */
CV_WRAP Ptr<Layer> getLayer(LayerId layerId);
/** @brief Returns pointers to input layers of specific layer. */
CV_WRAP std::vector<Ptr<Layer> > getLayerInputs(LayerId layerId);
/** @brief Delete layer for the network (not implemented yet) */
CV_WRAP void deleteLayer(LayerId layer);
/** @brief Connects output of the first layer to input of the second layer.
* @param outPin descriptor of the first layer output.
* @param inpPin descriptor of the second layer input.
*
* Descriptors have the following template <DFN>&lt;layer_name&gt;[.input_number]</DFN>:
* - the first part of the template <DFN>layer_name</DFN> is sting name of the added layer.
* If this part is empty then the network input pseudo layer will be used;
* - the second optional part of the template <DFN>input_number</DFN>
* is either number of the layer input, either label one.
* If this part is omitted then the first layer input will be used.
*
* @see setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex()
*/
CV_WRAP void connect(String outPin, String inpPin);
/** @brief Connects #@p outNum output of the first layer to #@p inNum input of the second layer.
* @param outLayerId identifier of the first layer
* @param inpLayerId identifier of the second layer
* @param outNum number of the first layer output
* @param inpNum number of the second layer input
*/
void connect(int outLayerId, int outNum, int inpLayerId, int inpNum);
/** @brief Sets outputs names of the network input pseudo layer.
*
* Each net always has special own the network input pseudo layer with id=0.
* This layer stores the user blobs only and don't make any computations.
* In fact, this layer provides the only way to pass user data into the network.
* As any other layer, this layer can label its outputs and this function provides an easy way to do this.
*/
CV_WRAP void setInputsNames(const std::vector<String> &inputBlobNames);
/** @brief Runs forward pass to compute output of layer with name @p outputName.
* @param outputName name for layer which output is needed to get
* @return blob for first output of specified layer.
* @details By default runs forward pass for the whole network.
*/
CV_WRAP Mat forward(const String& outputName = String());
/** @brief Runs forward pass to compute output of layer with name @p outputName.
* @param outputBlobs contains all output blobs for specified layer.
* @param outputName name for layer which output is needed to get
* @details If @p outputName is empty, runs forward pass for the whole network.
*/
CV_WRAP void forward(std::vector<Mat>& outputBlobs, const String& outputName = String());
/** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
* @param outputBlobs contains blobs for first outputs of specified layers.
* @param outBlobNames names for layers which outputs are needed to get
*/
CV_WRAP void forward(std::vector<Mat>& outputBlobs,
const std::vector<String>& outBlobNames);
/** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
* @param outputBlobs contains all output blobs for each layer specified in @p outBlobNames.
* @param outBlobNames names for layers which outputs are needed to get
*/
CV_WRAP void forward(std::vector<std::vector<Mat> >& outputBlobs,
const std::vector<String>& outBlobNames);
//TODO:
/** @brief Optimized forward.
* @warning Not implemented yet.
* @details Makes forward only those layers which weren't changed after previous forward().
*/
void forwardOpt(LayerId toLayer);
/** @overload */
void forwardOpt(const std::vector<LayerId> &toLayers);
/**
* @brief Compile Halide layers.
* @param[in] scheduler Path to YAML file with scheduling directives.
* @see setPreferableBackend
*
* Schedule layers that support Halide backend. Then compile them for
* specific target. For layers that not represented in scheduling file
* or if no manual scheduling used at all, automatic scheduling will be applied.
*/
void setHalideScheduler(const String& scheduler);
/**
* @brief Ask network to use specific computation backend where it supported.
* @param[in] backendId backend identifier.
* @see Backend
*/
void setPreferableBackend(int backendId);
/**
* @brief Ask network to make computations on specific target device.
* @param[in] targetId target identifier.
* @see Target
*/
void setPreferableTarget(int targetId);
/** @brief Sets the new value for the layer output blob
* @param name descriptor of the updating layer output blob.
* @param blob new blob.
* @see connect(String, String) to know format of the descriptor.
* @note If updating blob is not empty then @p blob must have the same shape,
* because network reshaping is not implemented yet.
*/
CV_WRAP void setInput(const Mat &blob, const String& name = "");
/** @brief Sets the new value for the learned param of the layer.
* @param layer name or id of the layer.
* @param numParam index of the layer parameter in the Layer::blobs array.
* @param blob the new value.
* @see Layer::blobs
* @note If shape of the new blob differs from the previous shape,
* then the following forward pass may fail.
*/
CV_WRAP void setParam(LayerId layer, int numParam, const Mat &blob);
/** @brief Returns parameter blob of the layer.
* @param layer name or id of the layer.
* @param numParam index of the layer parameter in the Layer::blobs array.
* @see Layer::blobs
*/
CV_WRAP Mat getParam(LayerId layer, int numParam = 0);
/** @brief Returns indexes of layers with unconnected outputs.
*/
CV_WRAP std::vector<int> getUnconnectedOutLayers() const;
/** @brief Returns input and output shapes for all layers in loaded model;
* preliminary inferencing isn't necessary.
* @param netInputShapes shapes for all input blobs in net input layer.
* @param layersIds output parameter for layer IDs.
* @param inLayersShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayersShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP void getLayersShapes(const std::vector<MatShape>& netInputShapes,
std::vector<int>* layersIds,
std::vector<std::vector<MatShape> >* inLayersShapes,
std::vector<std::vector<MatShape> >* outLayersShapes) const;
/** @overload */
CV_WRAP void getLayersShapes(const MatShape& netInputShape,
std::vector<int>* layersIds,
std::vector<std::vector<MatShape> >* inLayersShapes,
std::vector<std::vector<MatShape> >* outLayersShapes) const;
/** @brief Returns input and output shapes for layer with specified
* id in loaded model; preliminary inferencing isn't necessary.
* @param netInputShape shape input blob in net input layer.
* @param layerId id for layer.
* @param inLayerShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayerShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP void getLayerShapes(const MatShape& netInputShape,
const int layerId,
std::vector<MatShape>* inLayerShapes,
std::vector<MatShape>* outLayerShapes) const;
/** @overload */
CV_WRAP void getLayerShapes(const std::vector<MatShape>& netInputShapes,
const int layerId,
std::vector<MatShape>* inLayerShapes,
std::vector<MatShape>* outLayerShapes) const;
/** @brief Computes FLOP for whole loaded model with specified input shapes.
* @param netInputShapes vector of shapes for all net inputs.
* @returns computed FLOP.
*/
CV_WRAP int64 getFLOPS(const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const MatShape& netInputShape) const;
/** @brief Returns list of types for layer used in model.
* @param layersTypes output parameter for returning types.
*/
CV_WRAP void getLayerTypes(std::vector<String>& layersTypes) const;
/** @brief Returns count of layers of specified type.
* @param layerType type.
* @returns count of layers
*/
CV_WRAP int getLayersCount(const String& layerType) const;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for model.
* @param netInputShapes vector of shapes for all net inputs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
size_t& weights, size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const MatShape& netInputShape,
size_t& weights, size_t& blobs) const;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for each layer.
* @param netInputShapes vector of shapes for all net inputs.
* @param layerIds output vector to save layer IDs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
std::vector<int>& layerIds, std::vector<size_t>& weights,
std::vector<size_t>& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
std::vector<int>& layerIds, std::vector<size_t>& weights,
std::vector<size_t>& blobs) const;
private:
struct Impl;
Ptr<Impl> impl;
};
/** @brief Small interface class for loading trained serialized models of different dnn-frameworks. */
class CV_EXPORTS_W Importer
{
public:
/** @brief Adds loaded layers into the @p net and sets connections between them. */
CV_WRAP virtual void populateNet(Net net) = 0;
virtual ~Importer();
};
/** @brief Creates the importer of <a href="http://caffe.berkeleyvision.org">Caffe</a> framework network.
* @param prototxt path to the .prototxt file with text description of the network architecture.
* @param caffeModel path to the .caffemodel file with learned network.
* @returns Pointer to the created importer, NULL in failure cases.
*/
CV_EXPORTS_W Ptr<Importer> createCaffeImporter(const String &prototxt, const String &caffeModel = String());
/** @brief Reads a network model stored in Caffe model files.
* @details This is shortcut consisting from createCaffeImporter and Net::populateNet calls.
*/
CV_EXPORTS_W Net readNetFromCaffe(const String &prototxt, const String &caffeModel = String());
/** @brief Reads a network model stored in Tensorflow model file.
* @details This is shortcut consisting from createTensorflowImporter and Net::populateNet calls.
*/
CV_EXPORTS_W Net readNetFromTensorflow(const String &model);
/** @brief Reads a network model stored in Torch model file.
* @details This is shortcut consisting from createTorchImporter and Net::populateNet calls.
*/
CV_EXPORTS_W Net readNetFromTorch(const String &model, bool isBinary = true);
/** @brief Creates the importer of <a href="http://www.tensorflow.org">TensorFlow</a> framework network.
* @param model path to the .pb file with binary protobuf description of the network architecture.
* @returns Pointer to the created importer, NULL in failure cases.
*/
CV_EXPORTS Ptr<Importer> createTensorflowImporter(const String &model);
/** @brief Creates the importer of <a href="http://torch.ch">Torch7</a> framework network.
* @param filename path to the file, dumped from Torch by using torch.save() function.
* @param isBinary specifies whether the network was serialized in ascii mode or binary.
* @returns Pointer to the created importer, NULL in failure cases.
*
* @warning Torch7 importer is experimental now, you need explicitly set CMake `opencv_dnn_BUILD_TORCH_IMPORTER` flag to compile its.
*
* @note Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
* which has various bit-length on different systems.
*
* The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object
* with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.
*
* List of supported layers (i.e. object instances derived from Torch nn.Module class):
* - nn.Sequential
* - nn.Parallel
* - nn.Concat
* - nn.Linear
* - nn.SpatialConvolution
* - nn.SpatialMaxPooling, nn.SpatialAveragePooling
* - nn.ReLU, nn.TanH, nn.Sigmoid
* - nn.Reshape
* - nn.SoftMax, nn.LogSoftMax
*
* Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
*/
CV_EXPORTS_W Ptr<Importer> createTorchImporter(const String &filename, bool isBinary = true);
/** @brief Loads blob which was serialized as torch.Tensor object of Torch7 framework.
* @warning This function has the same limitations as createTorchImporter().
*/
CV_EXPORTS_W Mat readTorchBlob(const String &filename, bool isBinary = true);
/** @brief Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
* subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
* @param image input image (with 1- or 3-channels).
* @param size spatial size for output image
* @param mean scalar with mean values which are subtracted from channels. Values are intended
* to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
* @param scalefactor multiplier for @p image values.
* @param swapRB flag which indicates that swap first and last channels
* in 3-channel image is necessary.
* @details input image is resized so one side after resize is equal to corresponing
* dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
* @returns 4-dimansional Mat with NCHW dimensions order.
*/
CV_EXPORTS_W Mat blobFromImage(const Mat& image, double scalefactor=1.0, const Size& size = Size(),
const Scalar& mean = Scalar(), bool swapRB=true);
/** @brief Creates 4-dimensional blob from series of images. Optionally resizes and
* crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
* swap Blue and Red channels.
* @param images input images (all with 1- or 3-channels).
* @param size spatial size for output image
* @param mean scalar with mean values which are subtracted from channels. Values are intended
* to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
* @param scalefactor multiplier for @p images values.
* @param swapRB flag which indicates that swap first and last channels
* in 3-channel image is necessary.
* @details input image is resized so one side after resize is equal to corresponing
* dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
* @returns 4-dimansional Mat with NCHW dimensions order.
*/
CV_EXPORTS_W Mat blobFromImages(const std::vector<Mat>& images, double scalefactor=1.0,
Size size = Size(), const Scalar& mean = Scalar(), bool swapRB=true);
//! @}
}
}
#include <opencv2/dnn/layer.hpp>
#include <opencv2/dnn/dnn.inl.hpp>
#endif /* __OPENCV_DNN_DNN_HPP__ */

View File

@ -0,0 +1,357 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_DNN_INL_HPP__
#define __OPENCV_DNN_DNN_INL_HPP__
#include <opencv2/dnn.hpp>
namespace cv
{
namespace dnn
{
template<typename TypeIter>
DictValue DictValue::arrayInt(TypeIter begin, int size)
{
DictValue res(Param::INT, new AutoBuffer<int64, 1>(size));
for (int j = 0; j < size; begin++, j++)
(*res.pi)[j] = *begin;
return res;
}
template<typename TypeIter>
DictValue DictValue::arrayReal(TypeIter begin, int size)
{
DictValue res(Param::REAL, new AutoBuffer<double, 1>(size));
for (int j = 0; j < size; begin++, j++)
(*res.pd)[j] = *begin;
return res;
}
template<typename TypeIter>
DictValue DictValue::arrayString(TypeIter begin, int size)
{
DictValue res(Param::STRING, new AutoBuffer<String, 1>(size));
for (int j = 0; j < size; begin++, j++)
(*res.ps)[j] = *begin;
return res;
}
template<>
inline DictValue DictValue::get<DictValue>(int idx) const
{
CV_Assert(idx == -1);
return *this;
}
template<>
inline int64 DictValue::get<int64>(int idx) const
{
CV_Assert((idx == -1 && size() == 1) || (idx >= 0 && idx < size()));
idx = (idx == -1) ? 0 : idx;
if (type == Param::INT)
{
return (*pi)[idx];
}
else if (type == Param::REAL)
{
double doubleValue = (*pd)[idx];
double fracpart, intpart;
fracpart = std::modf(doubleValue, &intpart);
CV_Assert(fracpart == 0.0);
return (int64)doubleValue;
}
else
{
CV_Assert(isInt() || isReal());
return 0;
}
}
template<>
inline int DictValue::get<int>(int idx) const
{
return (int)get<int64>(idx);
}
template<>
inline unsigned DictValue::get<unsigned>(int idx) const
{
return (unsigned)get<int64>(idx);
}
template<>
inline bool DictValue::get<bool>(int idx) const
{
return (get<int64>(idx) != 0);
}
template<>
inline double DictValue::get<double>(int idx) const
{
CV_Assert((idx == -1 && size() == 1) || (idx >= 0 && idx < size()));
idx = (idx == -1) ? 0 : idx;
if (type == Param::REAL)
{
return (*pd)[idx];
}
else if (type == Param::INT)
{
return (double)(*pi)[idx];
}
else
{
CV_Assert(isReal() || isInt());
return 0;
}
}
template<>
inline float DictValue::get<float>(int idx) const
{
return (float)get<double>(idx);
}
template<>
inline String DictValue::get<String>(int idx) const
{
CV_Assert(isString());
CV_Assert((idx == -1 && ps->size() == 1) || (idx >= 0 && idx < (int)ps->size()));
return (*ps)[(idx == -1) ? 0 : idx];
}
inline void DictValue::release()
{
switch (type)
{
case Param::INT:
delete pi;
break;
case Param::STRING:
delete ps;
break;
case Param::REAL:
delete pd;
break;
}
}
inline DictValue::~DictValue()
{
release();
}
inline DictValue & DictValue::operator=(const DictValue &r)
{
if (&r == this)
return *this;
if (r.type == Param::INT)
{
AutoBuffer<int64, 1> *tmp = new AutoBuffer<int64, 1>(*r.pi);
release();
pi = tmp;
}
else if (r.type == Param::STRING)
{
AutoBuffer<String, 1> *tmp = new AutoBuffer<String, 1>(*r.ps);
release();
ps = tmp;
}
else if (r.type == Param::REAL)
{
AutoBuffer<double, 1> *tmp = new AutoBuffer<double, 1>(*r.pd);
release();
pd = tmp;
}
type = r.type;
return *this;
}
inline DictValue::DictValue(const DictValue &r)
{
type = r.type;
if (r.type == Param::INT)
pi = new AutoBuffer<int64, 1>(*r.pi);
else if (r.type == Param::STRING)
ps = new AutoBuffer<String, 1>(*r.ps);
else if (r.type == Param::REAL)
pd = new AutoBuffer<double, 1>(*r.pd);
}
inline bool DictValue::isString() const
{
return (type == Param::STRING);
}
inline bool DictValue::isInt() const
{
return (type == Param::INT);
}
inline bool DictValue::isReal() const
{
return (type == Param::REAL || type == Param::INT);
}
inline int DictValue::size() const
{
switch (type)
{
case Param::INT:
return (int)pi->size();
break;
case Param::STRING:
return (int)ps->size();
break;
case Param::REAL:
return (int)pd->size();
break;
default:
CV_Error(Error::StsInternal, "");
return -1;
}
}
inline std::ostream &operator<<(std::ostream &stream, const DictValue &dictv)
{
int i;
if (dictv.isInt())
{
for (i = 0; i < dictv.size() - 1; i++)
stream << dictv.get<int64>(i) << ", ";
stream << dictv.get<int64>(i);
}
else if (dictv.isReal())
{
for (i = 0; i < dictv.size() - 1; i++)
stream << dictv.get<double>(i) << ", ";
stream << dictv.get<double>(i);
}
else if (dictv.isString())
{
for (i = 0; i < dictv.size() - 1; i++)
stream << "\"" << dictv.get<String>(i) << "\", ";
stream << dictv.get<String>(i);
}
return stream;
}
/////////////////////////////////////////////////////////////////
inline bool Dict::has(const String &key) const
{
return dict.count(key) != 0;
}
inline DictValue *Dict::ptr(const String &key)
{
_Dict::iterator i = dict.find(key);
return (i == dict.end()) ? NULL : &i->second;
}
inline const DictValue *Dict::ptr(const String &key) const
{
_Dict::const_iterator i = dict.find(key);
return (i == dict.end()) ? NULL : &i->second;
}
inline const DictValue &Dict::get(const String &key) const
{
_Dict::const_iterator i = dict.find(key);
if (i == dict.end())
CV_Error(Error::StsObjectNotFound, "Required argument \"" + key + "\" not found into dictionary");
return i->second;
}
template <typename T>
inline T Dict::get(const String &key) const
{
return this->get(key).get<T>();
}
template <typename T>
inline T Dict::get(const String &key, const T &defaultValue) const
{
_Dict::const_iterator i = dict.find(key);
if (i != dict.end())
return i->second.get<T>();
else
return defaultValue;
}
template<typename T>
inline const T &Dict::set(const String &key, const T &value)
{
_Dict::iterator i = dict.find(key);
if (i != dict.end())
i->second = DictValue(value);
else
dict.insert(std::make_pair(key, DictValue(value)));
return value;
}
inline std::ostream &operator<<(std::ostream &stream, const Dict &dict)
{
Dict::_Dict::const_iterator it;
for (it = dict.dict.begin(); it != dict.dict.end(); it++)
stream << it->first << " : " << it->second << "\n";
return stream;
}
}
}
#endif

View File

@ -0,0 +1,148 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_LAYER_HPP__
#define __OPENCV_DNN_LAYER_HPP__
#include <opencv2/dnn.hpp>
namespace cv
{
namespace dnn
{
//! @addtogroup dnn
//! @{
//!
//! @defgroup dnnLayerFactory Utilities for New Layers Registration
//! @{
/** @brief %Layer factory allows to create instances of registered layers. */
class CV_EXPORTS LayerFactory
{
public:
//! Each Layer class must provide this function to the factory
typedef Ptr<Layer>(*Constuctor)(LayerParams &params);
//! Registers the layer class with typename @p type and specified @p constructor.
static void registerLayer(const String &type, Constuctor constructor);
//! Unregisters registered layer with specified type name.
static void unregisterLayer(const String &type);
/** @brief Creates instance of registered layer.
* @param type type name of creating layer.
* @param params parameters which will be used for layer initialization.
*/
static Ptr<Layer> createLayerInstance(const String &type, LayerParams& params);
private:
LayerFactory();
struct Impl;
static Ptr<Impl> impl();
};
/** @brief Registers layer constructor in runtime.
* @param type string, containing type name of the layer.
* @param constuctorFunc pointer to the function of type LayerRegister::Constuctor, which creates the layer.
* @details This macros must be placed inside the function code.
*/
#define REG_RUNTIME_LAYER_FUNC(type, constuctorFunc) \
cv::dnn::LayerFactory::registerLayer(#type, constuctorFunc);
/** @brief Registers layer class in runtime.
* @param type string, containing type name of the layer.
* @param class C++ class, derived from Layer.
* @details This macros must be placed inside the function code.
*/
#define REG_RUNTIME_LAYER_CLASS(type, class) \
cv::dnn::LayerFactory::registerLayer(#type, _layerDynamicRegisterer<class>);
/** @brief Registers layer constructor on module load time.
* @param type string, containing type name of the layer.
* @param constuctorFunc pointer to the function of type LayerRegister::Constuctor, which creates the layer.
* @details This macros must be placed outside the function code.
*/
#define REG_STATIC_LAYER_FUNC(type, constuctorFunc) \
static cv::dnn::_LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, constuctorFunc);
/** @brief Registers layer class on module load time.
* @param type string, containing type name of the layer.
* @param class C++ class, derived from Layer.
* @details This macros must be placed outside the function code.
*/
#define REG_STATIC_LAYER_CLASS(type, class) \
Ptr<Layer> __LayerStaticRegisterer_func_##type(LayerParams &params) \
{ return Ptr<Layer>(new class(params)); } \
static _LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, __LayerStaticRegisterer_func_##type);
//! @}
//! @}
template<typename LayerClass>
Ptr<Layer> _layerDynamicRegisterer(LayerParams &params)
{
return Ptr<Layer>(LayerClass::create(params));
}
//allows automatically register created layer on module load time
class _LayerStaticRegisterer
{
String type;
public:
_LayerStaticRegisterer(const String &layerType, LayerFactory::Constuctor layerConstuctor)
{
this->type = layerType;
LayerFactory::registerLayer(layerType, layerConstuctor);
}
~_LayerStaticRegisterer()
{
LayerFactory::unregisterLayer(type);
}
};
}
}
#endif

View File

@ -0,0 +1,195 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_DNN_SHAPE_UTILS_HPP__
#define __OPENCV_DNN_DNN_SHAPE_UTILS_HPP__
#include <opencv2/core.hpp>
#include <opencv2/core/types_c.h>
#include <ostream>
namespace cv {
namespace dnn {
//Useful shortcut
inline std::ostream &operator<< (std::ostream &s, cv::Range &r)
{
return s << "[" << r.start << ", " << r.end << ")";
}
//Slicing
struct _Range : public cv::Range
{
_Range(const Range &r) : cv::Range(r) {}
_Range(int start, int size = 1) : cv::Range(start, start + size) {}
};
static inline Mat slice(const Mat &m, const _Range &r0)
{
Range ranges[CV_MAX_DIM];
for (int i = 1; i < m.dims; i++)
ranges[i] = Range::all();
ranges[0] = r0;
return m(&ranges[0]);
}
static inline Mat slice(const Mat &m, const _Range &r0, const _Range &r1)
{
CV_Assert(m.dims >= 2);
Range ranges[CV_MAX_DIM];
for (int i = 2; i < m.dims; i++)
ranges[i] = Range::all();
ranges[0] = r0;
ranges[1] = r1;
return m(&ranges[0]);
}
static inline Mat slice(const Mat &m, const _Range &r0, const _Range &r1, const _Range &r2)
{
CV_Assert(m.dims >= 3);
Range ranges[CV_MAX_DIM];
for (int i = 3; i < m.dims; i++)
ranges[i] = Range::all();
ranges[0] = r0;
ranges[1] = r1;
ranges[2] = r2;
return m(&ranges[0]);
}
static inline Mat slice(const Mat &m, const _Range &r0, const _Range &r1, const _Range &r2, const _Range &r3)
{
CV_Assert(m.dims >= 4);
Range ranges[CV_MAX_DIM];
for (int i = 4; i < m.dims; i++)
ranges[i] = Range::all();
ranges[0] = r0;
ranges[1] = r1;
ranges[2] = r2;
ranges[3] = r3;
return m(&ranges[0]);
}
static inline Mat getPlane(const Mat &m, int n, int cn)
{
CV_Assert(m.dims > 2);
Range range[CV_MAX_DIM];
int sz[CV_MAX_DIM];
for(int i = 2; i < m.dims; i++)
{
sz[i-2] = m.size.p[i];
range[i] = Range::all();
}
range[0] = Range(n, n+1);
range[1] = Range(cn, cn+1);
return m(range).reshape(1, m.dims-2, sz);
}
static inline MatShape shape(const int* dims, const int n = 4)
{
MatShape shape;
shape.assign(dims, dims + n);
return shape;
}
static inline MatShape shape(const Mat& mat)
{
return shape(mat.size.p, mat.dims);
}
namespace {inline bool is_neg(int i) { return i < 0; }}
static inline MatShape shape(int a0, int a1=-1, int a2=-1, int a3=-1)
{
int dims[] = {a0, a1, a2, a3};
MatShape s = shape(dims);
s.erase(std::remove_if(s.begin(), s.end(), is_neg), s.end());
return s;
}
static inline int total(const MatShape& shape, int start = -1, int end = -1)
{
if (start == -1) start = 0;
if (end == -1) end = shape.size();
if (shape.empty())
return 0;
int elems = 1;
CV_Assert(start < shape.size() && end <= shape.size() &&
start <= end);
for(int i = start; i < end; i++)
{
elems *= shape[i];
}
return elems;
}
static inline MatShape concat(const MatShape& a, const MatShape& b)
{
MatShape c = a;
c.insert(c.end(), b.begin(), b.end());
return c;
}
inline void print(const MatShape& shape, const String& name = "")
{
printf("%s: [", name.c_str());
size_t i, n = shape.size();
for( i = 0; i < n; i++ )
printf(" %d", shape[i]);
printf(" ]\n");
}
inline int clamp(int ax, int dims)
{
return ax < 0 ? ax + dims : ax;
}
inline int clamp(int ax, const MatShape& shape)
{
return clamp(ax, shape.size());
}
}
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
#ifdef HAVE_OPENCV_DNN
typedef dnn::DictValue LayerId;
typedef std::vector<dnn::MatShape> vector_MatShape;
typedef std::vector<std::vector<dnn::MatShape> > vector_vector_MatShape;
typedef std::vector<size_t> vector_size_t;
typedef std::vector<std::vector<Mat> > vector_vector_Mat;
template<>
bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const char *name)
{
(void)name;
if (!o || o == Py_None)
return true; //Current state will be used
else if (PyLong_Check(o))
{
dv = dnn::DictValue((int64)PyLong_AsLongLong(o));
return true;
}
else if (PyFloat_Check(o))
{
dv = dnn::DictValue(PyFloat_AS_DOUBLE(o));
return true;
}
else if (PyString_Check(o))
{
dv = dnn::DictValue(String(PyString_AsString(o)));
return true;
}
else
return false;
}
template<>
bool pyopencv_to(PyObject *o, std::vector<Mat> &blobs, const char *name) //required for Layer::blobs RW
{
return pyopencvVecConverter<Mat>::to(o, blobs, ArgInfo(name, false));
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,814 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: graph.proto
#ifndef PROTOBUF_graph_2eproto__INCLUDED
#define PROTOBUF_graph_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/message.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/map.h>
#include <google/protobuf/map_field_inl.h>
#include <google/protobuf/unknown_field_set.h>
#include "attr_value.pb.h"
#include "function.pb.h"
#include "versions.pb.h"
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_graph_2eproto();
void protobuf_InitDefaults_graph_2eproto();
void protobuf_AssignDesc_graph_2eproto();
void protobuf_ShutdownFile_graph_2eproto();
class GraphDef;
class NodeDef;
// ===================================================================
class GraphDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.GraphDef) */ {
public:
GraphDef();
virtual ~GraphDef();
GraphDef(const GraphDef& from);
inline GraphDef& operator=(const GraphDef& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const GraphDef& default_instance();
static const GraphDef* internal_default_instance();
void UnsafeArenaSwap(GraphDef* other);
void Swap(GraphDef* other);
// implements Message ----------------------------------------------
inline GraphDef* New() const { return New(NULL); }
GraphDef* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const GraphDef& from);
void MergeFrom(const GraphDef& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(GraphDef* other);
void UnsafeMergeFrom(const GraphDef& from);
protected:
explicit GraphDef(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .tensorflow.NodeDef node = 1;
int node_size() const;
void clear_node();
static const int kNodeFieldNumber = 1;
const ::tensorflow::NodeDef& node(int index) const;
::tensorflow::NodeDef* mutable_node(int index);
::tensorflow::NodeDef* add_node();
::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef >*
mutable_node();
const ::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef >&
node() const;
// optional .tensorflow.VersionDef versions = 4;
bool has_versions() const;
void clear_versions();
static const int kVersionsFieldNumber = 4;
private:
void _slow_mutable_versions();
void _slow_set_allocated_versions(
::google::protobuf::Arena* message_arena, ::tensorflow::VersionDef** versions);
::tensorflow::VersionDef* _slow_release_versions();
public:
const ::tensorflow::VersionDef& versions() const;
::tensorflow::VersionDef* mutable_versions();
::tensorflow::VersionDef* release_versions();
void set_allocated_versions(::tensorflow::VersionDef* versions);
::tensorflow::VersionDef* unsafe_arena_release_versions();
void unsafe_arena_set_allocated_versions(
::tensorflow::VersionDef* versions);
// optional int32 version = 3 [deprecated = true];
GOOGLE_PROTOBUF_DEPRECATED_ATTR void clear_version();
GOOGLE_PROTOBUF_DEPRECATED_ATTR static const int kVersionFieldNumber = 3;
GOOGLE_PROTOBUF_DEPRECATED_ATTR ::google::protobuf::int32 version() const;
GOOGLE_PROTOBUF_DEPRECATED_ATTR void set_version(::google::protobuf::int32 value);
// optional .tensorflow.FunctionDefLibrary library = 2;
bool has_library() const;
void clear_library();
static const int kLibraryFieldNumber = 2;
private:
void _slow_mutable_library();
void _slow_set_allocated_library(
::google::protobuf::Arena* message_arena, ::tensorflow::FunctionDefLibrary** library);
::tensorflow::FunctionDefLibrary* _slow_release_library();
public:
const ::tensorflow::FunctionDefLibrary& library() const;
::tensorflow::FunctionDefLibrary* mutable_library();
::tensorflow::FunctionDefLibrary* release_library();
void set_allocated_library(::tensorflow::FunctionDefLibrary* library);
::tensorflow::FunctionDefLibrary* unsafe_arena_release_library();
void unsafe_arena_set_allocated_library(
::tensorflow::FunctionDefLibrary* library);
// @@protoc_insertion_point(class_scope:tensorflow.GraphDef)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef > node_;
::tensorflow::VersionDef* versions_;
::tensorflow::FunctionDefLibrary* library_;
::google::protobuf::int32 version_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_graph_2eproto_impl();
friend void protobuf_AddDesc_graph_2eproto_impl();
friend void protobuf_AssignDesc_graph_2eproto();
friend void protobuf_ShutdownFile_graph_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<GraphDef> GraphDef_default_instance_;
// -------------------------------------------------------------------
class NodeDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.NodeDef) */ {
public:
NodeDef();
virtual ~NodeDef();
NodeDef(const NodeDef& from);
inline NodeDef& operator=(const NodeDef& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const NodeDef& default_instance();
static const NodeDef* internal_default_instance();
void UnsafeArenaSwap(NodeDef* other);
void Swap(NodeDef* other);
// implements Message ----------------------------------------------
inline NodeDef* New() const { return New(NULL); }
NodeDef* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const NodeDef& from);
void MergeFrom(const NodeDef& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(NodeDef* other);
void UnsafeMergeFrom(const NodeDef& from);
protected:
explicit NodeDef(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string name = 1;
void clear_name();
static const int kNameFieldNumber = 1;
const ::std::string& name() const;
void set_name(const ::std::string& value);
void set_name(const char* value);
void set_name(const char* value, size_t size);
::std::string* mutable_name();
::std::string* release_name();
void set_allocated_name(::std::string* name);
::std::string* unsafe_arena_release_name();
void unsafe_arena_set_allocated_name(
::std::string* name);
// optional string op = 2;
void clear_op();
static const int kOpFieldNumber = 2;
const ::std::string& op() const;
void set_op(const ::std::string& value);
void set_op(const char* value);
void set_op(const char* value, size_t size);
::std::string* mutable_op();
::std::string* release_op();
void set_allocated_op(::std::string* op);
::std::string* unsafe_arena_release_op();
void unsafe_arena_set_allocated_op(
::std::string* op);
// repeated string input = 3;
int input_size() const;
void clear_input();
static const int kInputFieldNumber = 3;
const ::std::string& input(int index) const;
::std::string* mutable_input(int index);
void set_input(int index, const ::std::string& value);
void set_input(int index, const char* value);
void set_input(int index, const char* value, size_t size);
::std::string* add_input();
void add_input(const ::std::string& value);
void add_input(const char* value);
void add_input(const char* value, size_t size);
const ::google::protobuf::RepeatedPtrField< ::std::string>& input() const;
::google::protobuf::RepeatedPtrField< ::std::string>* mutable_input();
// optional string device = 4;
void clear_device();
static const int kDeviceFieldNumber = 4;
const ::std::string& device() const;
void set_device(const ::std::string& value);
void set_device(const char* value);
void set_device(const char* value, size_t size);
::std::string* mutable_device();
::std::string* release_device();
void set_allocated_device(::std::string* device);
::std::string* unsafe_arena_release_device();
void unsafe_arena_set_allocated_device(
::std::string* device);
// map<string, .tensorflow.AttrValue> attr = 5;
int attr_size() const;
void clear_attr();
static const int kAttrFieldNumber = 5;
const ::google::protobuf::Map< ::std::string, ::tensorflow::AttrValue >&
attr() const;
::google::protobuf::Map< ::std::string, ::tensorflow::AttrValue >*
mutable_attr();
// @@protoc_insertion_point(class_scope:tensorflow.NodeDef)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedPtrField< ::std::string> input_;
typedef ::google::protobuf::internal::MapEntryLite<
::std::string, ::tensorflow::AttrValue,
::google::protobuf::internal::WireFormatLite::TYPE_STRING,
::google::protobuf::internal::WireFormatLite::TYPE_MESSAGE,
0 >
NodeDef_AttrEntry;
::google::protobuf::internal::MapField<
::std::string, ::tensorflow::AttrValue,
::google::protobuf::internal::WireFormatLite::TYPE_STRING,
::google::protobuf::internal::WireFormatLite::TYPE_MESSAGE,
0 > attr_;
::google::protobuf::internal::ArenaStringPtr name_;
::google::protobuf::internal::ArenaStringPtr op_;
::google::protobuf::internal::ArenaStringPtr device_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_graph_2eproto_impl();
friend void protobuf_AddDesc_graph_2eproto_impl();
friend void protobuf_AssignDesc_graph_2eproto();
friend void protobuf_ShutdownFile_graph_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<NodeDef> NodeDef_default_instance_;
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
// GraphDef
// repeated .tensorflow.NodeDef node = 1;
inline int GraphDef::node_size() const {
return node_.size();
}
inline void GraphDef::clear_node() {
node_.Clear();
}
inline const ::tensorflow::NodeDef& GraphDef::node(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.GraphDef.node)
return node_.Get(index);
}
inline ::tensorflow::NodeDef* GraphDef::mutable_node(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.GraphDef.node)
return node_.Mutable(index);
}
inline ::tensorflow::NodeDef* GraphDef::add_node() {
// @@protoc_insertion_point(field_add:tensorflow.GraphDef.node)
return node_.Add();
}
inline ::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef >*
GraphDef::mutable_node() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.GraphDef.node)
return &node_;
}
inline const ::google::protobuf::RepeatedPtrField< ::tensorflow::NodeDef >&
GraphDef::node() const {
// @@protoc_insertion_point(field_list:tensorflow.GraphDef.node)
return node_;
}
// optional .tensorflow.VersionDef versions = 4;
inline bool GraphDef::has_versions() const {
return this != internal_default_instance() && versions_ != NULL;
}
inline void GraphDef::clear_versions() {
if (GetArenaNoVirtual() == NULL && versions_ != NULL) delete versions_;
versions_ = NULL;
}
inline const ::tensorflow::VersionDef& GraphDef::versions() const {
// @@protoc_insertion_point(field_get:tensorflow.GraphDef.versions)
return versions_ != NULL ? *versions_
: *::tensorflow::VersionDef::internal_default_instance();
}
inline ::tensorflow::VersionDef* GraphDef::mutable_versions() {
if (versions_ == NULL) {
_slow_mutable_versions();
}
// @@protoc_insertion_point(field_mutable:tensorflow.GraphDef.versions)
return versions_;
}
inline ::tensorflow::VersionDef* GraphDef::release_versions() {
// @@protoc_insertion_point(field_release:tensorflow.GraphDef.versions)
if (GetArenaNoVirtual() != NULL) {
return _slow_release_versions();
} else {
::tensorflow::VersionDef* temp = versions_;
versions_ = NULL;
return temp;
}
}
inline void GraphDef::set_allocated_versions(::tensorflow::VersionDef* versions) {
::google::protobuf::Arena* message_arena = GetArenaNoVirtual();
if (message_arena == NULL) {
delete versions_;
}
if (versions != NULL) {
_slow_set_allocated_versions(message_arena, &versions);
}
versions_ = versions;
if (versions) {
} else {
}
// @@protoc_insertion_point(field_set_allocated:tensorflow.GraphDef.versions)
}
// optional int32 version = 3 [deprecated = true];
inline void GraphDef::clear_version() {
version_ = 0;
}
inline ::google::protobuf::int32 GraphDef::version() const {
// @@protoc_insertion_point(field_get:tensorflow.GraphDef.version)
return version_;
}
inline void GraphDef::set_version(::google::protobuf::int32 value) {
version_ = value;
// @@protoc_insertion_point(field_set:tensorflow.GraphDef.version)
}
// optional .tensorflow.FunctionDefLibrary library = 2;
inline bool GraphDef::has_library() const {
return this != internal_default_instance() && library_ != NULL;
}
inline void GraphDef::clear_library() {
if (GetArenaNoVirtual() == NULL && library_ != NULL) delete library_;
library_ = NULL;
}
inline const ::tensorflow::FunctionDefLibrary& GraphDef::library() const {
// @@protoc_insertion_point(field_get:tensorflow.GraphDef.library)
return library_ != NULL ? *library_
: *::tensorflow::FunctionDefLibrary::internal_default_instance();
}
inline ::tensorflow::FunctionDefLibrary* GraphDef::mutable_library() {
if (library_ == NULL) {
_slow_mutable_library();
}
// @@protoc_insertion_point(field_mutable:tensorflow.GraphDef.library)
return library_;
}
inline ::tensorflow::FunctionDefLibrary* GraphDef::release_library() {
// @@protoc_insertion_point(field_release:tensorflow.GraphDef.library)
if (GetArenaNoVirtual() != NULL) {
return _slow_release_library();
} else {
::tensorflow::FunctionDefLibrary* temp = library_;
library_ = NULL;
return temp;
}
}
inline void GraphDef::set_allocated_library(::tensorflow::FunctionDefLibrary* library) {
::google::protobuf::Arena* message_arena = GetArenaNoVirtual();
if (message_arena == NULL) {
delete library_;
}
if (library != NULL) {
_slow_set_allocated_library(message_arena, &library);
}
library_ = library;
if (library) {
} else {
}
// @@protoc_insertion_point(field_set_allocated:tensorflow.GraphDef.library)
}
inline const GraphDef* GraphDef::internal_default_instance() {
return &GraphDef_default_instance_.get();
}
// -------------------------------------------------------------------
// NodeDef
// optional string name = 1;
inline void NodeDef::clear_name() {
name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& NodeDef::name() const {
// @@protoc_insertion_point(field_get:tensorflow.NodeDef.name)
return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void NodeDef::set_name(const ::std::string& value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.NodeDef.name)
}
inline void NodeDef::set_name(const char* value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.NodeDef.name)
}
inline void NodeDef::set_name(const char* value,
size_t size) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.NodeDef.name)
}
inline ::std::string* NodeDef::mutable_name() {
// @@protoc_insertion_point(field_mutable:tensorflow.NodeDef.name)
return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::release_name() {
// @@protoc_insertion_point(field_release:tensorflow.NodeDef.name)
return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::unsafe_arena_release_name() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.NodeDef.name)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void NodeDef::set_allocated_name(::std::string* name) {
if (name != NULL) {
} else {
}
name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.NodeDef.name)
}
inline void NodeDef::unsafe_arena_set_allocated_name(
::std::string* name) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (name != NULL) {
} else {
}
name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
name, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.NodeDef.name)
}
// optional string op = 2;
inline void NodeDef::clear_op() {
op_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& NodeDef::op() const {
// @@protoc_insertion_point(field_get:tensorflow.NodeDef.op)
return op_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void NodeDef::set_op(const ::std::string& value) {
op_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.NodeDef.op)
}
inline void NodeDef::set_op(const char* value) {
op_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.NodeDef.op)
}
inline void NodeDef::set_op(const char* value,
size_t size) {
op_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.NodeDef.op)
}
inline ::std::string* NodeDef::mutable_op() {
// @@protoc_insertion_point(field_mutable:tensorflow.NodeDef.op)
return op_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::release_op() {
// @@protoc_insertion_point(field_release:tensorflow.NodeDef.op)
return op_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::unsafe_arena_release_op() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.NodeDef.op)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return op_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void NodeDef::set_allocated_op(::std::string* op) {
if (op != NULL) {
} else {
}
op_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), op,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.NodeDef.op)
}
inline void NodeDef::unsafe_arena_set_allocated_op(
::std::string* op) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (op != NULL) {
} else {
}
op_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
op, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.NodeDef.op)
}
// repeated string input = 3;
inline int NodeDef::input_size() const {
return input_.size();
}
inline void NodeDef::clear_input() {
input_.Clear();
}
inline const ::std::string& NodeDef::input(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.NodeDef.input)
return input_.Get(index);
}
inline ::std::string* NodeDef::mutable_input(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.NodeDef.input)
return input_.Mutable(index);
}
inline void NodeDef::set_input(int index, const ::std::string& value) {
// @@protoc_insertion_point(field_set:tensorflow.NodeDef.input)
input_.Mutable(index)->assign(value);
}
inline void NodeDef::set_input(int index, const char* value) {
input_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:tensorflow.NodeDef.input)
}
inline void NodeDef::set_input(int index, const char* value, size_t size) {
input_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:tensorflow.NodeDef.input)
}
inline ::std::string* NodeDef::add_input() {
// @@protoc_insertion_point(field_add_mutable:tensorflow.NodeDef.input)
return input_.Add();
}
inline void NodeDef::add_input(const ::std::string& value) {
input_.Add()->assign(value);
// @@protoc_insertion_point(field_add:tensorflow.NodeDef.input)
}
inline void NodeDef::add_input(const char* value) {
input_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:tensorflow.NodeDef.input)
}
inline void NodeDef::add_input(const char* value, size_t size) {
input_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:tensorflow.NodeDef.input)
}
inline const ::google::protobuf::RepeatedPtrField< ::std::string>&
NodeDef::input() const {
// @@protoc_insertion_point(field_list:tensorflow.NodeDef.input)
return input_;
}
inline ::google::protobuf::RepeatedPtrField< ::std::string>*
NodeDef::mutable_input() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.NodeDef.input)
return &input_;
}
// optional string device = 4;
inline void NodeDef::clear_device() {
device_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& NodeDef::device() const {
// @@protoc_insertion_point(field_get:tensorflow.NodeDef.device)
return device_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void NodeDef::set_device(const ::std::string& value) {
device_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.NodeDef.device)
}
inline void NodeDef::set_device(const char* value) {
device_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.NodeDef.device)
}
inline void NodeDef::set_device(const char* value,
size_t size) {
device_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.NodeDef.device)
}
inline ::std::string* NodeDef::mutable_device() {
// @@protoc_insertion_point(field_mutable:tensorflow.NodeDef.device)
return device_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::release_device() {
// @@protoc_insertion_point(field_release:tensorflow.NodeDef.device)
return device_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* NodeDef::unsafe_arena_release_device() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.NodeDef.device)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return device_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void NodeDef::set_allocated_device(::std::string* device) {
if (device != NULL) {
} else {
}
device_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), device,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.NodeDef.device)
}
inline void NodeDef::unsafe_arena_set_allocated_device(
::std::string* device) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (device != NULL) {
} else {
}
device_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
device, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.NodeDef.device)
}
// map<string, .tensorflow.AttrValue> attr = 5;
inline int NodeDef::attr_size() const {
return attr_.size();
}
inline void NodeDef::clear_attr() {
attr_.Clear();
}
inline const ::google::protobuf::Map< ::std::string, ::tensorflow::AttrValue >&
NodeDef::attr() const {
// @@protoc_insertion_point(field_map:tensorflow.NodeDef.attr)
return attr_.GetMap();
}
inline ::google::protobuf::Map< ::std::string, ::tensorflow::AttrValue >*
NodeDef::mutable_attr() {
// @@protoc_insertion_point(field_mutable_map:tensorflow.NodeDef.attr)
return attr_.MutableMap();
}
inline const NodeDef* NodeDef::internal_default_instance() {
return &NodeDef_default_instance_.get();
}
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_graph_2eproto__INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,770 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensor.proto
#ifndef PROTOBUF_tensor_2eproto__INCLUDED
#define PROTOBUF_tensor_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/message.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/unknown_field_set.h>
#include "tensor_shape.pb.h"
#include "types.pb.h"
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_tensor_2eproto();
void protobuf_InitDefaults_tensor_2eproto();
void protobuf_AssignDesc_tensor_2eproto();
void protobuf_ShutdownFile_tensor_2eproto();
class TensorProto;
// ===================================================================
class TensorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.TensorProto) */ {
public:
TensorProto();
virtual ~TensorProto();
TensorProto(const TensorProto& from);
inline TensorProto& operator=(const TensorProto& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const TensorProto& default_instance();
static const TensorProto* internal_default_instance();
void UnsafeArenaSwap(TensorProto* other);
void Swap(TensorProto* other);
// implements Message ----------------------------------------------
inline TensorProto* New() const { return New(NULL); }
TensorProto* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const TensorProto& from);
void MergeFrom(const TensorProto& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(TensorProto* other);
void UnsafeMergeFrom(const TensorProto& from);
protected:
explicit TensorProto(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional .tensorflow.DataType dtype = 1;
void clear_dtype();
static const int kDtypeFieldNumber = 1;
::tensorflow::DataType dtype() const;
void set_dtype(::tensorflow::DataType value);
// optional .tensorflow.TensorShapeProto tensor_shape = 2;
bool has_tensor_shape() const;
void clear_tensor_shape();
static const int kTensorShapeFieldNumber = 2;
private:
void _slow_mutable_tensor_shape();
void _slow_set_allocated_tensor_shape(
::google::protobuf::Arena* message_arena, ::tensorflow::TensorShapeProto** tensor_shape);
::tensorflow::TensorShapeProto* _slow_release_tensor_shape();
public:
const ::tensorflow::TensorShapeProto& tensor_shape() const;
::tensorflow::TensorShapeProto* mutable_tensor_shape();
::tensorflow::TensorShapeProto* release_tensor_shape();
void set_allocated_tensor_shape(::tensorflow::TensorShapeProto* tensor_shape);
::tensorflow::TensorShapeProto* unsafe_arena_release_tensor_shape();
void unsafe_arena_set_allocated_tensor_shape(
::tensorflow::TensorShapeProto* tensor_shape);
// optional int32 version_number = 3;
void clear_version_number();
static const int kVersionNumberFieldNumber = 3;
::google::protobuf::int32 version_number() const;
void set_version_number(::google::protobuf::int32 value);
// optional bytes tensor_content = 4;
void clear_tensor_content();
static const int kTensorContentFieldNumber = 4;
const ::std::string& tensor_content() const;
void set_tensor_content(const ::std::string& value);
void set_tensor_content(const char* value);
void set_tensor_content(const void* value, size_t size);
::std::string* mutable_tensor_content();
::std::string* release_tensor_content();
void set_allocated_tensor_content(::std::string* tensor_content);
::std::string* unsafe_arena_release_tensor_content();
void unsafe_arena_set_allocated_tensor_content(
::std::string* tensor_content);
// repeated int32 half_val = 13 [packed = true];
int half_val_size() const;
void clear_half_val();
static const int kHalfValFieldNumber = 13;
::google::protobuf::int32 half_val(int index) const;
void set_half_val(int index, ::google::protobuf::int32 value);
void add_half_val(::google::protobuf::int32 value);
const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
half_val() const;
::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
mutable_half_val();
// repeated float float_val = 5 [packed = true];
int float_val_size() const;
void clear_float_val();
static const int kFloatValFieldNumber = 5;
float float_val(int index) const;
void set_float_val(int index, float value);
void add_float_val(float value);
const ::google::protobuf::RepeatedField< float >&
float_val() const;
::google::protobuf::RepeatedField< float >*
mutable_float_val();
// repeated double double_val = 6 [packed = true];
int double_val_size() const;
void clear_double_val();
static const int kDoubleValFieldNumber = 6;
double double_val(int index) const;
void set_double_val(int index, double value);
void add_double_val(double value);
const ::google::protobuf::RepeatedField< double >&
double_val() const;
::google::protobuf::RepeatedField< double >*
mutable_double_val();
// repeated int32 int_val = 7 [packed = true];
int int_val_size() const;
void clear_int_val();
static const int kIntValFieldNumber = 7;
::google::protobuf::int32 int_val(int index) const;
void set_int_val(int index, ::google::protobuf::int32 value);
void add_int_val(::google::protobuf::int32 value);
const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
int_val() const;
::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
mutable_int_val();
// repeated bytes string_val = 8;
int string_val_size() const;
void clear_string_val();
static const int kStringValFieldNumber = 8;
const ::std::string& string_val(int index) const;
::std::string* mutable_string_val(int index);
void set_string_val(int index, const ::std::string& value);
void set_string_val(int index, const char* value);
void set_string_val(int index, const void* value, size_t size);
::std::string* add_string_val();
void add_string_val(const ::std::string& value);
void add_string_val(const char* value);
void add_string_val(const void* value, size_t size);
const ::google::protobuf::RepeatedPtrField< ::std::string>& string_val() const;
::google::protobuf::RepeatedPtrField< ::std::string>* mutable_string_val();
// repeated float scomplex_val = 9 [packed = true];
int scomplex_val_size() const;
void clear_scomplex_val();
static const int kScomplexValFieldNumber = 9;
float scomplex_val(int index) const;
void set_scomplex_val(int index, float value);
void add_scomplex_val(float value);
const ::google::protobuf::RepeatedField< float >&
scomplex_val() const;
::google::protobuf::RepeatedField< float >*
mutable_scomplex_val();
// repeated int64 int64_val = 10 [packed = true];
int int64_val_size() const;
void clear_int64_val();
static const int kInt64ValFieldNumber = 10;
::google::protobuf::int64 int64_val(int index) const;
void set_int64_val(int index, ::google::protobuf::int64 value);
void add_int64_val(::google::protobuf::int64 value);
const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&
int64_val() const;
::google::protobuf::RepeatedField< ::google::protobuf::int64 >*
mutable_int64_val();
// repeated bool bool_val = 11 [packed = true];
int bool_val_size() const;
void clear_bool_val();
static const int kBoolValFieldNumber = 11;
bool bool_val(int index) const;
void set_bool_val(int index, bool value);
void add_bool_val(bool value);
const ::google::protobuf::RepeatedField< bool >&
bool_val() const;
::google::protobuf::RepeatedField< bool >*
mutable_bool_val();
// repeated double dcomplex_val = 12 [packed = true];
int dcomplex_val_size() const;
void clear_dcomplex_val();
static const int kDcomplexValFieldNumber = 12;
double dcomplex_val(int index) const;
void set_dcomplex_val(int index, double value);
void add_dcomplex_val(double value);
const ::google::protobuf::RepeatedField< double >&
dcomplex_val() const;
::google::protobuf::RepeatedField< double >*
mutable_dcomplex_val();
// @@protoc_insertion_point(class_scope:tensorflow.TensorProto)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedField< ::google::protobuf::int32 > half_val_;
mutable int _half_val_cached_byte_size_;
::google::protobuf::RepeatedField< float > float_val_;
mutable int _float_val_cached_byte_size_;
::google::protobuf::RepeatedField< double > double_val_;
mutable int _double_val_cached_byte_size_;
::google::protobuf::RepeatedField< ::google::protobuf::int32 > int_val_;
mutable int _int_val_cached_byte_size_;
::google::protobuf::RepeatedPtrField< ::std::string> string_val_;
::google::protobuf::RepeatedField< float > scomplex_val_;
mutable int _scomplex_val_cached_byte_size_;
::google::protobuf::RepeatedField< ::google::protobuf::int64 > int64_val_;
mutable int _int64_val_cached_byte_size_;
::google::protobuf::RepeatedField< bool > bool_val_;
mutable int _bool_val_cached_byte_size_;
::google::protobuf::RepeatedField< double > dcomplex_val_;
mutable int _dcomplex_val_cached_byte_size_;
::google::protobuf::internal::ArenaStringPtr tensor_content_;
::tensorflow::TensorShapeProto* tensor_shape_;
int dtype_;
::google::protobuf::int32 version_number_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_tensor_2eproto_impl();
friend void protobuf_AddDesc_tensor_2eproto_impl();
friend void protobuf_AssignDesc_tensor_2eproto();
friend void protobuf_ShutdownFile_tensor_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<TensorProto> TensorProto_default_instance_;
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
// TensorProto
// optional .tensorflow.DataType dtype = 1;
inline void TensorProto::clear_dtype() {
dtype_ = 0;
}
inline ::tensorflow::DataType TensorProto::dtype() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.dtype)
return static_cast< ::tensorflow::DataType >(dtype_);
}
inline void TensorProto::set_dtype(::tensorflow::DataType value) {
dtype_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.dtype)
}
// optional .tensorflow.TensorShapeProto tensor_shape = 2;
inline bool TensorProto::has_tensor_shape() const {
return this != internal_default_instance() && tensor_shape_ != NULL;
}
inline void TensorProto::clear_tensor_shape() {
if (GetArenaNoVirtual() == NULL && tensor_shape_ != NULL) delete tensor_shape_;
tensor_shape_ = NULL;
}
inline const ::tensorflow::TensorShapeProto& TensorProto::tensor_shape() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.tensor_shape)
return tensor_shape_ != NULL ? *tensor_shape_
: *::tensorflow::TensorShapeProto::internal_default_instance();
}
inline ::tensorflow::TensorShapeProto* TensorProto::mutable_tensor_shape() {
if (tensor_shape_ == NULL) {
_slow_mutable_tensor_shape();
}
// @@protoc_insertion_point(field_mutable:tensorflow.TensorProto.tensor_shape)
return tensor_shape_;
}
inline ::tensorflow::TensorShapeProto* TensorProto::release_tensor_shape() {
// @@protoc_insertion_point(field_release:tensorflow.TensorProto.tensor_shape)
if (GetArenaNoVirtual() != NULL) {
return _slow_release_tensor_shape();
} else {
::tensorflow::TensorShapeProto* temp = tensor_shape_;
tensor_shape_ = NULL;
return temp;
}
}
inline void TensorProto::set_allocated_tensor_shape(::tensorflow::TensorShapeProto* tensor_shape) {
::google::protobuf::Arena* message_arena = GetArenaNoVirtual();
if (message_arena == NULL) {
delete tensor_shape_;
}
if (tensor_shape != NULL) {
_slow_set_allocated_tensor_shape(message_arena, &tensor_shape);
}
tensor_shape_ = tensor_shape;
if (tensor_shape) {
} else {
}
// @@protoc_insertion_point(field_set_allocated:tensorflow.TensorProto.tensor_shape)
}
// optional int32 version_number = 3;
inline void TensorProto::clear_version_number() {
version_number_ = 0;
}
inline ::google::protobuf::int32 TensorProto::version_number() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.version_number)
return version_number_;
}
inline void TensorProto::set_version_number(::google::protobuf::int32 value) {
version_number_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.version_number)
}
// optional bytes tensor_content = 4;
inline void TensorProto::clear_tensor_content() {
tensor_content_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& TensorProto::tensor_content() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.tensor_content)
return tensor_content_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void TensorProto::set_tensor_content(const ::std::string& value) {
tensor_content_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.tensor_content)
}
inline void TensorProto::set_tensor_content(const char* value) {
tensor_content_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.TensorProto.tensor_content)
}
inline void TensorProto::set_tensor_content(const void* value,
size_t size) {
tensor_content_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.TensorProto.tensor_content)
}
inline ::std::string* TensorProto::mutable_tensor_content() {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorProto.tensor_content)
return tensor_content_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* TensorProto::release_tensor_content() {
// @@protoc_insertion_point(field_release:tensorflow.TensorProto.tensor_content)
return tensor_content_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* TensorProto::unsafe_arena_release_tensor_content() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.TensorProto.tensor_content)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return tensor_content_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void TensorProto::set_allocated_tensor_content(::std::string* tensor_content) {
if (tensor_content != NULL) {
} else {
}
tensor_content_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), tensor_content,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.TensorProto.tensor_content)
}
inline void TensorProto::unsafe_arena_set_allocated_tensor_content(
::std::string* tensor_content) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (tensor_content != NULL) {
} else {
}
tensor_content_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
tensor_content, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TensorProto.tensor_content)
}
// repeated int32 half_val = 13 [packed = true];
inline int TensorProto::half_val_size() const {
return half_val_.size();
}
inline void TensorProto::clear_half_val() {
half_val_.Clear();
}
inline ::google::protobuf::int32 TensorProto::half_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.half_val)
return half_val_.Get(index);
}
inline void TensorProto::set_half_val(int index, ::google::protobuf::int32 value) {
half_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.half_val)
}
inline void TensorProto::add_half_val(::google::protobuf::int32 value) {
half_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.half_val)
}
inline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
TensorProto::half_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.half_val)
return half_val_;
}
inline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
TensorProto::mutable_half_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.half_val)
return &half_val_;
}
// repeated float float_val = 5 [packed = true];
inline int TensorProto::float_val_size() const {
return float_val_.size();
}
inline void TensorProto::clear_float_val() {
float_val_.Clear();
}
inline float TensorProto::float_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.float_val)
return float_val_.Get(index);
}
inline void TensorProto::set_float_val(int index, float value) {
float_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.float_val)
}
inline void TensorProto::add_float_val(float value) {
float_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.float_val)
}
inline const ::google::protobuf::RepeatedField< float >&
TensorProto::float_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.float_val)
return float_val_;
}
inline ::google::protobuf::RepeatedField< float >*
TensorProto::mutable_float_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.float_val)
return &float_val_;
}
// repeated double double_val = 6 [packed = true];
inline int TensorProto::double_val_size() const {
return double_val_.size();
}
inline void TensorProto::clear_double_val() {
double_val_.Clear();
}
inline double TensorProto::double_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.double_val)
return double_val_.Get(index);
}
inline void TensorProto::set_double_val(int index, double value) {
double_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.double_val)
}
inline void TensorProto::add_double_val(double value) {
double_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.double_val)
}
inline const ::google::protobuf::RepeatedField< double >&
TensorProto::double_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.double_val)
return double_val_;
}
inline ::google::protobuf::RepeatedField< double >*
TensorProto::mutable_double_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.double_val)
return &double_val_;
}
// repeated int32 int_val = 7 [packed = true];
inline int TensorProto::int_val_size() const {
return int_val_.size();
}
inline void TensorProto::clear_int_val() {
int_val_.Clear();
}
inline ::google::protobuf::int32 TensorProto::int_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.int_val)
return int_val_.Get(index);
}
inline void TensorProto::set_int_val(int index, ::google::protobuf::int32 value) {
int_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.int_val)
}
inline void TensorProto::add_int_val(::google::protobuf::int32 value) {
int_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.int_val)
}
inline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
TensorProto::int_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.int_val)
return int_val_;
}
inline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
TensorProto::mutable_int_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.int_val)
return &int_val_;
}
// repeated bytes string_val = 8;
inline int TensorProto::string_val_size() const {
return string_val_.size();
}
inline void TensorProto::clear_string_val() {
string_val_.Clear();
}
inline const ::std::string& TensorProto::string_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.string_val)
return string_val_.Get(index);
}
inline ::std::string* TensorProto::mutable_string_val(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorProto.string_val)
return string_val_.Mutable(index);
}
inline void TensorProto::set_string_val(int index, const ::std::string& value) {
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.string_val)
string_val_.Mutable(index)->assign(value);
}
inline void TensorProto::set_string_val(int index, const char* value) {
string_val_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:tensorflow.TensorProto.string_val)
}
inline void TensorProto::set_string_val(int index, const void* value, size_t size) {
string_val_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:tensorflow.TensorProto.string_val)
}
inline ::std::string* TensorProto::add_string_val() {
// @@protoc_insertion_point(field_add_mutable:tensorflow.TensorProto.string_val)
return string_val_.Add();
}
inline void TensorProto::add_string_val(const ::std::string& value) {
string_val_.Add()->assign(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.string_val)
}
inline void TensorProto::add_string_val(const char* value) {
string_val_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:tensorflow.TensorProto.string_val)
}
inline void TensorProto::add_string_val(const void* value, size_t size) {
string_val_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:tensorflow.TensorProto.string_val)
}
inline const ::google::protobuf::RepeatedPtrField< ::std::string>&
TensorProto::string_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.string_val)
return string_val_;
}
inline ::google::protobuf::RepeatedPtrField< ::std::string>*
TensorProto::mutable_string_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.string_val)
return &string_val_;
}
// repeated float scomplex_val = 9 [packed = true];
inline int TensorProto::scomplex_val_size() const {
return scomplex_val_.size();
}
inline void TensorProto::clear_scomplex_val() {
scomplex_val_.Clear();
}
inline float TensorProto::scomplex_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.scomplex_val)
return scomplex_val_.Get(index);
}
inline void TensorProto::set_scomplex_val(int index, float value) {
scomplex_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.scomplex_val)
}
inline void TensorProto::add_scomplex_val(float value) {
scomplex_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.scomplex_val)
}
inline const ::google::protobuf::RepeatedField< float >&
TensorProto::scomplex_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.scomplex_val)
return scomplex_val_;
}
inline ::google::protobuf::RepeatedField< float >*
TensorProto::mutable_scomplex_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.scomplex_val)
return &scomplex_val_;
}
// repeated int64 int64_val = 10 [packed = true];
inline int TensorProto::int64_val_size() const {
return int64_val_.size();
}
inline void TensorProto::clear_int64_val() {
int64_val_.Clear();
}
inline ::google::protobuf::int64 TensorProto::int64_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.int64_val)
return int64_val_.Get(index);
}
inline void TensorProto::set_int64_val(int index, ::google::protobuf::int64 value) {
int64_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.int64_val)
}
inline void TensorProto::add_int64_val(::google::protobuf::int64 value) {
int64_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.int64_val)
}
inline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&
TensorProto::int64_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.int64_val)
return int64_val_;
}
inline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*
TensorProto::mutable_int64_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.int64_val)
return &int64_val_;
}
// repeated bool bool_val = 11 [packed = true];
inline int TensorProto::bool_val_size() const {
return bool_val_.size();
}
inline void TensorProto::clear_bool_val() {
bool_val_.Clear();
}
inline bool TensorProto::bool_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.bool_val)
return bool_val_.Get(index);
}
inline void TensorProto::set_bool_val(int index, bool value) {
bool_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.bool_val)
}
inline void TensorProto::add_bool_val(bool value) {
bool_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.bool_val)
}
inline const ::google::protobuf::RepeatedField< bool >&
TensorProto::bool_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.bool_val)
return bool_val_;
}
inline ::google::protobuf::RepeatedField< bool >*
TensorProto::mutable_bool_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.bool_val)
return &bool_val_;
}
// repeated double dcomplex_val = 12 [packed = true];
inline int TensorProto::dcomplex_val_size() const {
return dcomplex_val_.size();
}
inline void TensorProto::clear_dcomplex_val() {
dcomplex_val_.Clear();
}
inline double TensorProto::dcomplex_val(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorProto.dcomplex_val)
return dcomplex_val_.Get(index);
}
inline void TensorProto::set_dcomplex_val(int index, double value) {
dcomplex_val_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.TensorProto.dcomplex_val)
}
inline void TensorProto::add_dcomplex_val(double value) {
dcomplex_val_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.TensorProto.dcomplex_val)
}
inline const ::google::protobuf::RepeatedField< double >&
TensorProto::dcomplex_val() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorProto.dcomplex_val)
return dcomplex_val_;
}
inline ::google::protobuf::RepeatedField< double >*
TensorProto::mutable_dcomplex_val() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorProto.dcomplex_val)
return &dcomplex_val_;
}
inline const TensorProto* TensorProto::internal_default_instance() {
return &TensorProto_default_instance_.get();
}
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_tensor_2eproto__INCLUDED

View File

@ -0,0 +1,895 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensor_shape.proto
#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
#include "tensor_shape.pb.h"
#include <algorithm>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/port.h>
#include <google/protobuf/stubs/once.h>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/wire_format_lite_inl.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/generated_message_reflection.h>
#include <google/protobuf/reflection_ops.h>
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
namespace {
const ::google::protobuf::Descriptor* TensorShapeProto_descriptor_ = NULL;
const ::google::protobuf::internal::GeneratedMessageReflection*
TensorShapeProto_reflection_ = NULL;
const ::google::protobuf::Descriptor* TensorShapeProto_Dim_descriptor_ = NULL;
const ::google::protobuf::internal::GeneratedMessageReflection*
TensorShapeProto_Dim_reflection_ = NULL;
} // namespace
void protobuf_AssignDesc_tensor_5fshape_2eproto() GOOGLE_ATTRIBUTE_COLD;
void protobuf_AssignDesc_tensor_5fshape_2eproto() {
protobuf_AddDesc_tensor_5fshape_2eproto();
const ::google::protobuf::FileDescriptor* file =
::google::protobuf::DescriptorPool::generated_pool()->FindFileByName(
"tensor_shape.proto");
GOOGLE_CHECK(file != NULL);
TensorShapeProto_descriptor_ = file->message_type(0);
static const int TensorShapeProto_offsets_[2] = {
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto, dim_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto, unknown_rank_),
};
TensorShapeProto_reflection_ =
::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
TensorShapeProto_descriptor_,
TensorShapeProto::internal_default_instance(),
TensorShapeProto_offsets_,
-1,
-1,
-1,
sizeof(TensorShapeProto),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto, _internal_metadata_));
TensorShapeProto_Dim_descriptor_ = TensorShapeProto_descriptor_->nested_type(0);
static const int TensorShapeProto_Dim_offsets_[2] = {
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto_Dim, size_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto_Dim, name_),
};
TensorShapeProto_Dim_reflection_ =
::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
TensorShapeProto_Dim_descriptor_,
TensorShapeProto_Dim::internal_default_instance(),
TensorShapeProto_Dim_offsets_,
-1,
-1,
-1,
sizeof(TensorShapeProto_Dim),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TensorShapeProto_Dim, _internal_metadata_));
}
namespace {
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_);
void protobuf_AssignDescriptorsOnce() {
::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_,
&protobuf_AssignDesc_tensor_5fshape_2eproto);
}
void protobuf_RegisterTypes(const ::std::string&) GOOGLE_ATTRIBUTE_COLD;
void protobuf_RegisterTypes(const ::std::string&) {
protobuf_AssignDescriptorsOnce();
::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
TensorShapeProto_descriptor_, TensorShapeProto::internal_default_instance());
::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
TensorShapeProto_Dim_descriptor_, TensorShapeProto_Dim::internal_default_instance());
}
} // namespace
void protobuf_ShutdownFile_tensor_5fshape_2eproto() {
TensorShapeProto_default_instance_.Shutdown();
delete TensorShapeProto_reflection_;
TensorShapeProto_Dim_default_instance_.Shutdown();
delete TensorShapeProto_Dim_reflection_;
}
void protobuf_InitDefaults_tensor_5fshape_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
TensorShapeProto_default_instance_.DefaultConstruct();
::google::protobuf::internal::GetEmptyString();
TensorShapeProto_Dim_default_instance_.DefaultConstruct();
TensorShapeProto_default_instance_.get_mutable()->InitAsDefaultInstance();
TensorShapeProto_Dim_default_instance_.get_mutable()->InitAsDefaultInstance();
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_InitDefaults_tensor_5fshape_2eproto_once_);
void protobuf_InitDefaults_tensor_5fshape_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_InitDefaults_tensor_5fshape_2eproto_once_,
&protobuf_InitDefaults_tensor_5fshape_2eproto_impl);
}
void protobuf_AddDesc_tensor_5fshape_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
protobuf_InitDefaults_tensor_5fshape_2eproto();
::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
"\n\022tensor_shape.proto\022\ntensorflow\"z\n\020Tens"
"orShapeProto\022-\n\003dim\030\002 \003(\0132 .tensorflow.T"
"ensorShapeProto.Dim\022\024\n\014unknown_rank\030\003 \001("
"\010\032!\n\003Dim\022\014\n\004size\030\001 \001(\003\022\014\n\004name\030\002 \001(\tB2\n\030"
"org.tensorflow.frameworkB\021TensorShapePro"
"tosP\001\370\001\001b\006proto3", 216);
::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
"tensor_shape.proto", &protobuf_RegisterTypes);
::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_tensor_5fshape_2eproto);
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AddDesc_tensor_5fshape_2eproto_once_);
void protobuf_AddDesc_tensor_5fshape_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_AddDesc_tensor_5fshape_2eproto_once_,
&protobuf_AddDesc_tensor_5fshape_2eproto_impl);
}
// Force AddDescriptors() to be called at static initialization time.
struct StaticDescriptorInitializer_tensor_5fshape_2eproto {
StaticDescriptorInitializer_tensor_5fshape_2eproto() {
protobuf_AddDesc_tensor_5fshape_2eproto();
}
} static_descriptor_initializer_tensor_5fshape_2eproto_;
namespace {
static void MergeFromFail(int line) GOOGLE_ATTRIBUTE_COLD GOOGLE_ATTRIBUTE_NORETURN;
static void MergeFromFail(int line) {
::google::protobuf::internal::MergeFromFail(__FILE__, line);
}
} // namespace
// ===================================================================
#if !defined(_MSC_VER) || _MSC_VER >= 1900
const int TensorShapeProto_Dim::kSizeFieldNumber;
const int TensorShapeProto_Dim::kNameFieldNumber;
#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
TensorShapeProto_Dim::TensorShapeProto_Dim()
: ::google::protobuf::Message(), _internal_metadata_(NULL) {
if (this != internal_default_instance()) protobuf_InitDefaults_tensor_5fshape_2eproto();
SharedCtor();
// @@protoc_insertion_point(constructor:tensorflow.TensorShapeProto.Dim)
}
TensorShapeProto_Dim::TensorShapeProto_Dim(::google::protobuf::Arena* arena)
: ::google::protobuf::Message(),
_internal_metadata_(arena) {
#ifdef GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
protobuf_InitDefaults_tensor_5fshape_2eproto();
#endif // GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
SharedCtor();
RegisterArenaDtor(arena);
// @@protoc_insertion_point(arena_constructor:tensorflow.TensorShapeProto.Dim)
}
void TensorShapeProto_Dim::InitAsDefaultInstance() {
}
TensorShapeProto_Dim::TensorShapeProto_Dim(const TensorShapeProto_Dim& from)
: ::google::protobuf::Message(),
_internal_metadata_(NULL) {
SharedCtor();
UnsafeMergeFrom(from);
// @@protoc_insertion_point(copy_constructor:tensorflow.TensorShapeProto.Dim)
}
void TensorShapeProto_Dim::SharedCtor() {
name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
size_ = GOOGLE_LONGLONG(0);
_cached_size_ = 0;
}
TensorShapeProto_Dim::~TensorShapeProto_Dim() {
// @@protoc_insertion_point(destructor:tensorflow.TensorShapeProto.Dim)
SharedDtor();
}
void TensorShapeProto_Dim::SharedDtor() {
::google::protobuf::Arena* arena = GetArenaNoVirtual();
if (arena != NULL) {
return;
}
name_.Destroy(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), arena);
}
void TensorShapeProto_Dim::ArenaDtor(void* object) {
TensorShapeProto_Dim* _this = reinterpret_cast< TensorShapeProto_Dim* >(object);
(void)_this;
}
void TensorShapeProto_Dim::RegisterArenaDtor(::google::protobuf::Arena* arena) {
}
void TensorShapeProto_Dim::SetCachedSize(int size) const {
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
}
const ::google::protobuf::Descriptor* TensorShapeProto_Dim::descriptor() {
protobuf_AssignDescriptorsOnce();
return TensorShapeProto_Dim_descriptor_;
}
const TensorShapeProto_Dim& TensorShapeProto_Dim::default_instance() {
protobuf_InitDefaults_tensor_5fshape_2eproto();
return *internal_default_instance();
}
::google::protobuf::internal::ExplicitlyConstructed<TensorShapeProto_Dim> TensorShapeProto_Dim_default_instance_;
TensorShapeProto_Dim* TensorShapeProto_Dim::New(::google::protobuf::Arena* arena) const {
return ::google::protobuf::Arena::CreateMessage<TensorShapeProto_Dim>(arena);
}
void TensorShapeProto_Dim::Clear() {
// @@protoc_insertion_point(message_clear_start:tensorflow.TensorShapeProto.Dim)
size_ = GOOGLE_LONGLONG(0);
name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
bool TensorShapeProto_Dim::MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
::google::protobuf::uint32 tag;
// @@protoc_insertion_point(parse_start:tensorflow.TensorShapeProto.Dim)
for (;;) {
::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// optional int64 size = 1;
case 1: {
if (tag == 8) {
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
input, &size_)));
} else {
goto handle_unusual;
}
if (input->ExpectTag(18)) goto parse_name;
break;
}
// optional string name = 2;
case 2: {
if (tag == 18) {
parse_name:
DO_(::google::protobuf::internal::WireFormatLite::ReadString(
input, this->mutable_name()));
DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
this->name().data(), this->name().length(),
::google::protobuf::internal::WireFormatLite::PARSE,
"tensorflow.TensorShapeProto.Dim.name"));
} else {
goto handle_unusual;
}
if (input->ExpectAtEnd()) goto success;
break;
}
default: {
handle_unusual:
if (tag == 0 ||
::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
goto success;
}
DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:tensorflow.TensorShapeProto.Dim)
return true;
failure:
// @@protoc_insertion_point(parse_failure:tensorflow.TensorShapeProto.Dim)
return false;
#undef DO_
}
void TensorShapeProto_Dim::SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:tensorflow.TensorShapeProto.Dim)
// optional int64 size = 1;
if (this->size() != 0) {
::google::protobuf::internal::WireFormatLite::WriteInt64(1, this->size(), output);
}
// optional string name = 2;
if (this->name().size() > 0) {
::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
this->name().data(), this->name().length(),
::google::protobuf::internal::WireFormatLite::SERIALIZE,
"tensorflow.TensorShapeProto.Dim.name");
::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
2, this->name(), output);
}
// @@protoc_insertion_point(serialize_end:tensorflow.TensorShapeProto.Dim)
}
::google::protobuf::uint8* TensorShapeProto_Dim::InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* target) const {
(void)deterministic; // Unused
// @@protoc_insertion_point(serialize_to_array_start:tensorflow.TensorShapeProto.Dim)
// optional int64 size = 1;
if (this->size() != 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(1, this->size(), target);
}
// optional string name = 2;
if (this->name().size() > 0) {
::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
this->name().data(), this->name().length(),
::google::protobuf::internal::WireFormatLite::SERIALIZE,
"tensorflow.TensorShapeProto.Dim.name");
target =
::google::protobuf::internal::WireFormatLite::WriteStringToArray(
2, this->name(), target);
}
// @@protoc_insertion_point(serialize_to_array_end:tensorflow.TensorShapeProto.Dim)
return target;
}
size_t TensorShapeProto_Dim::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:tensorflow.TensorShapeProto.Dim)
size_t total_size = 0;
// optional int64 size = 1;
if (this->size() != 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::Int64Size(
this->size());
}
// optional string name = 2;
if (this->name().size() > 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::StringSize(
this->name());
}
int cached_size = ::google::protobuf::internal::ToCachedSize(total_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
return total_size;
}
void TensorShapeProto_Dim::MergeFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.TensorShapeProto.Dim)
if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
const TensorShapeProto_Dim* source =
::google::protobuf::internal::DynamicCastToGenerated<const TensorShapeProto_Dim>(
&from);
if (source == NULL) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.TensorShapeProto.Dim)
::google::protobuf::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.TensorShapeProto.Dim)
UnsafeMergeFrom(*source);
}
}
void TensorShapeProto_Dim::MergeFrom(const TensorShapeProto_Dim& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TensorShapeProto.Dim)
if (GOOGLE_PREDICT_TRUE(&from != this)) {
UnsafeMergeFrom(from);
} else {
MergeFromFail(__LINE__);
}
}
void TensorShapeProto_Dim::UnsafeMergeFrom(const TensorShapeProto_Dim& from) {
GOOGLE_DCHECK(&from != this);
if (from.size() != 0) {
set_size(from.size());
}
if (from.name().size() > 0) {
set_name(from.name());
}
}
void TensorShapeProto_Dim::CopyFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.TensorShapeProto.Dim)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void TensorShapeProto_Dim::CopyFrom(const TensorShapeProto_Dim& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TensorShapeProto.Dim)
if (&from == this) return;
Clear();
UnsafeMergeFrom(from);
}
bool TensorShapeProto_Dim::IsInitialized() const {
return true;
}
void TensorShapeProto_Dim::Swap(TensorShapeProto_Dim* other) {
if (other == this) return;
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
InternalSwap(other);
} else {
TensorShapeProto_Dim temp;
temp.UnsafeMergeFrom(*this);
CopyFrom(*other);
other->CopyFrom(temp);
}
}
void TensorShapeProto_Dim::UnsafeArenaSwap(TensorShapeProto_Dim* other) {
if (other == this) return;
GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());
InternalSwap(other);
}
void TensorShapeProto_Dim::InternalSwap(TensorShapeProto_Dim* other) {
std::swap(size_, other->size_);
name_.Swap(&other->name_);
_internal_metadata_.Swap(&other->_internal_metadata_);
std::swap(_cached_size_, other->_cached_size_);
}
::google::protobuf::Metadata TensorShapeProto_Dim::GetMetadata() const {
protobuf_AssignDescriptorsOnce();
::google::protobuf::Metadata metadata;
metadata.descriptor = TensorShapeProto_Dim_descriptor_;
metadata.reflection = TensorShapeProto_Dim_reflection_;
return metadata;
}
// -------------------------------------------------------------------
#if !defined(_MSC_VER) || _MSC_VER >= 1900
const int TensorShapeProto::kDimFieldNumber;
const int TensorShapeProto::kUnknownRankFieldNumber;
#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
TensorShapeProto::TensorShapeProto()
: ::google::protobuf::Message(), _internal_metadata_(NULL) {
if (this != internal_default_instance()) protobuf_InitDefaults_tensor_5fshape_2eproto();
SharedCtor();
// @@protoc_insertion_point(constructor:tensorflow.TensorShapeProto)
}
TensorShapeProto::TensorShapeProto(::google::protobuf::Arena* arena)
: ::google::protobuf::Message(),
_internal_metadata_(arena),
dim_(arena) {
#ifdef GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
protobuf_InitDefaults_tensor_5fshape_2eproto();
#endif // GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
SharedCtor();
RegisterArenaDtor(arena);
// @@protoc_insertion_point(arena_constructor:tensorflow.TensorShapeProto)
}
void TensorShapeProto::InitAsDefaultInstance() {
}
TensorShapeProto::TensorShapeProto(const TensorShapeProto& from)
: ::google::protobuf::Message(),
_internal_metadata_(NULL) {
SharedCtor();
UnsafeMergeFrom(from);
// @@protoc_insertion_point(copy_constructor:tensorflow.TensorShapeProto)
}
void TensorShapeProto::SharedCtor() {
unknown_rank_ = false;
_cached_size_ = 0;
}
TensorShapeProto::~TensorShapeProto() {
// @@protoc_insertion_point(destructor:tensorflow.TensorShapeProto)
SharedDtor();
}
void TensorShapeProto::SharedDtor() {
::google::protobuf::Arena* arena = GetArenaNoVirtual();
if (arena != NULL) {
return;
}
}
void TensorShapeProto::ArenaDtor(void* object) {
TensorShapeProto* _this = reinterpret_cast< TensorShapeProto* >(object);
(void)_this;
}
void TensorShapeProto::RegisterArenaDtor(::google::protobuf::Arena* arena) {
}
void TensorShapeProto::SetCachedSize(int size) const {
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
}
const ::google::protobuf::Descriptor* TensorShapeProto::descriptor() {
protobuf_AssignDescriptorsOnce();
return TensorShapeProto_descriptor_;
}
const TensorShapeProto& TensorShapeProto::default_instance() {
protobuf_InitDefaults_tensor_5fshape_2eproto();
return *internal_default_instance();
}
::google::protobuf::internal::ExplicitlyConstructed<TensorShapeProto> TensorShapeProto_default_instance_;
TensorShapeProto* TensorShapeProto::New(::google::protobuf::Arena* arena) const {
return ::google::protobuf::Arena::CreateMessage<TensorShapeProto>(arena);
}
void TensorShapeProto::Clear() {
// @@protoc_insertion_point(message_clear_start:tensorflow.TensorShapeProto)
unknown_rank_ = false;
dim_.Clear();
}
bool TensorShapeProto::MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
::google::protobuf::uint32 tag;
// @@protoc_insertion_point(parse_start:tensorflow.TensorShapeProto)
for (;;) {
::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
case 2: {
if (tag == 18) {
DO_(input->IncrementRecursionDepth());
parse_loop_dim:
DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtualNoRecursionDepth(
input, add_dim()));
} else {
goto handle_unusual;
}
if (input->ExpectTag(18)) goto parse_loop_dim;
input->UnsafeDecrementRecursionDepth();
if (input->ExpectTag(24)) goto parse_unknown_rank;
break;
}
// optional bool unknown_rank = 3;
case 3: {
if (tag == 24) {
parse_unknown_rank:
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
bool, ::google::protobuf::internal::WireFormatLite::TYPE_BOOL>(
input, &unknown_rank_)));
} else {
goto handle_unusual;
}
if (input->ExpectAtEnd()) goto success;
break;
}
default: {
handle_unusual:
if (tag == 0 ||
::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
goto success;
}
DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:tensorflow.TensorShapeProto)
return true;
failure:
// @@protoc_insertion_point(parse_failure:tensorflow.TensorShapeProto)
return false;
#undef DO_
}
void TensorShapeProto::SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:tensorflow.TensorShapeProto)
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
for (unsigned int i = 0, n = this->dim_size(); i < n; i++) {
::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
2, this->dim(i), output);
}
// optional bool unknown_rank = 3;
if (this->unknown_rank() != 0) {
::google::protobuf::internal::WireFormatLite::WriteBool(3, this->unknown_rank(), output);
}
// @@protoc_insertion_point(serialize_end:tensorflow.TensorShapeProto)
}
::google::protobuf::uint8* TensorShapeProto::InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* target) const {
(void)deterministic; // Unused
// @@protoc_insertion_point(serialize_to_array_start:tensorflow.TensorShapeProto)
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
for (unsigned int i = 0, n = this->dim_size(); i < n; i++) {
target = ::google::protobuf::internal::WireFormatLite::
InternalWriteMessageNoVirtualToArray(
2, this->dim(i), false, target);
}
// optional bool unknown_rank = 3;
if (this->unknown_rank() != 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteBoolToArray(3, this->unknown_rank(), target);
}
// @@protoc_insertion_point(serialize_to_array_end:tensorflow.TensorShapeProto)
return target;
}
size_t TensorShapeProto::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:tensorflow.TensorShapeProto)
size_t total_size = 0;
// optional bool unknown_rank = 3;
if (this->unknown_rank() != 0) {
total_size += 1 + 1;
}
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
{
unsigned int count = this->dim_size();
total_size += 1UL * count;
for (unsigned int i = 0; i < count; i++) {
total_size +=
::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
this->dim(i));
}
}
int cached_size = ::google::protobuf::internal::ToCachedSize(total_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
return total_size;
}
void TensorShapeProto::MergeFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.TensorShapeProto)
if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
const TensorShapeProto* source =
::google::protobuf::internal::DynamicCastToGenerated<const TensorShapeProto>(
&from);
if (source == NULL) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.TensorShapeProto)
::google::protobuf::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.TensorShapeProto)
UnsafeMergeFrom(*source);
}
}
void TensorShapeProto::MergeFrom(const TensorShapeProto& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TensorShapeProto)
if (GOOGLE_PREDICT_TRUE(&from != this)) {
UnsafeMergeFrom(from);
} else {
MergeFromFail(__LINE__);
}
}
void TensorShapeProto::UnsafeMergeFrom(const TensorShapeProto& from) {
GOOGLE_DCHECK(&from != this);
dim_.MergeFrom(from.dim_);
if (from.unknown_rank() != 0) {
set_unknown_rank(from.unknown_rank());
}
}
void TensorShapeProto::CopyFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.TensorShapeProto)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void TensorShapeProto::CopyFrom(const TensorShapeProto& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TensorShapeProto)
if (&from == this) return;
Clear();
UnsafeMergeFrom(from);
}
bool TensorShapeProto::IsInitialized() const {
return true;
}
void TensorShapeProto::Swap(TensorShapeProto* other) {
if (other == this) return;
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
InternalSwap(other);
} else {
TensorShapeProto temp;
temp.UnsafeMergeFrom(*this);
CopyFrom(*other);
other->CopyFrom(temp);
}
}
void TensorShapeProto::UnsafeArenaSwap(TensorShapeProto* other) {
if (other == this) return;
GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());
InternalSwap(other);
}
void TensorShapeProto::InternalSwap(TensorShapeProto* other) {
dim_.UnsafeArenaSwap(&other->dim_);
std::swap(unknown_rank_, other->unknown_rank_);
_internal_metadata_.Swap(&other->_internal_metadata_);
std::swap(_cached_size_, other->_cached_size_);
}
::google::protobuf::Metadata TensorShapeProto::GetMetadata() const {
protobuf_AssignDescriptorsOnce();
::google::protobuf::Metadata metadata;
metadata.descriptor = TensorShapeProto_descriptor_;
metadata.reflection = TensorShapeProto_reflection_;
return metadata;
}
#if PROTOBUF_INLINE_NOT_IN_HEADERS
// TensorShapeProto_Dim
// optional int64 size = 1;
void TensorShapeProto_Dim::clear_size() {
size_ = GOOGLE_LONGLONG(0);
}
::google::protobuf::int64 TensorShapeProto_Dim::size() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.Dim.size)
return size_;
}
void TensorShapeProto_Dim::set_size(::google::protobuf::int64 value) {
size_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.Dim.size)
}
// optional string name = 2;
void TensorShapeProto_Dim::clear_name() {
name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
const ::std::string& TensorShapeProto_Dim::name() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.Dim.name)
return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
void TensorShapeProto_Dim::set_name(const ::std::string& value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.Dim.name)
}
void TensorShapeProto_Dim::set_name(const char* value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.TensorShapeProto.Dim.name)
}
void TensorShapeProto_Dim::set_name(const char* value,
size_t size) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.TensorShapeProto.Dim.name)
}
::std::string* TensorShapeProto_Dim::mutable_name() {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorShapeProto.Dim.name)
return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
::std::string* TensorShapeProto_Dim::release_name() {
// @@protoc_insertion_point(field_release:tensorflow.TensorShapeProto.Dim.name)
return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
::std::string* TensorShapeProto_Dim::unsafe_arena_release_name() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.TensorShapeProto.Dim.name)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
void TensorShapeProto_Dim::set_allocated_name(::std::string* name) {
if (name != NULL) {
} else {
}
name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.TensorShapeProto.Dim.name)
}
void TensorShapeProto_Dim::unsafe_arena_set_allocated_name(
::std::string* name) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (name != NULL) {
} else {
}
name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
name, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TensorShapeProto.Dim.name)
}
inline const TensorShapeProto_Dim* TensorShapeProto_Dim::internal_default_instance() {
return &TensorShapeProto_Dim_default_instance_.get();
}
// -------------------------------------------------------------------
// TensorShapeProto
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
int TensorShapeProto::dim_size() const {
return dim_.size();
}
void TensorShapeProto::clear_dim() {
dim_.Clear();
}
const ::tensorflow::TensorShapeProto_Dim& TensorShapeProto::dim(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.dim)
return dim_.Get(index);
}
::tensorflow::TensorShapeProto_Dim* TensorShapeProto::mutable_dim(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorShapeProto.dim)
return dim_.Mutable(index);
}
::tensorflow::TensorShapeProto_Dim* TensorShapeProto::add_dim() {
// @@protoc_insertion_point(field_add:tensorflow.TensorShapeProto.dim)
return dim_.Add();
}
::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >*
TensorShapeProto::mutable_dim() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorShapeProto.dim)
return &dim_;
}
const ::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >&
TensorShapeProto::dim() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorShapeProto.dim)
return dim_;
}
// optional bool unknown_rank = 3;
void TensorShapeProto::clear_unknown_rank() {
unknown_rank_ = false;
}
bool TensorShapeProto::unknown_rank() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.unknown_rank)
return unknown_rank_;
}
void TensorShapeProto::set_unknown_rank(bool value) {
unknown_rank_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.unknown_rank)
}
inline const TensorShapeProto* TensorShapeProto::internal_default_instance() {
return &TensorShapeProto_default_instance_.get();
}
#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)

View File

@ -0,0 +1,423 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensor_shape.proto
#ifndef PROTOBUF_tensor_5fshape_2eproto__INCLUDED
#define PROTOBUF_tensor_5fshape_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/message.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/unknown_field_set.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_tensor_5fshape_2eproto();
void protobuf_InitDefaults_tensor_5fshape_2eproto();
void protobuf_AssignDesc_tensor_5fshape_2eproto();
void protobuf_ShutdownFile_tensor_5fshape_2eproto();
class TensorShapeProto;
class TensorShapeProto_Dim;
// ===================================================================
class TensorShapeProto_Dim : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.TensorShapeProto.Dim) */ {
public:
TensorShapeProto_Dim();
virtual ~TensorShapeProto_Dim();
TensorShapeProto_Dim(const TensorShapeProto_Dim& from);
inline TensorShapeProto_Dim& operator=(const TensorShapeProto_Dim& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const TensorShapeProto_Dim& default_instance();
static const TensorShapeProto_Dim* internal_default_instance();
void UnsafeArenaSwap(TensorShapeProto_Dim* other);
void Swap(TensorShapeProto_Dim* other);
// implements Message ----------------------------------------------
inline TensorShapeProto_Dim* New() const { return New(NULL); }
TensorShapeProto_Dim* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const TensorShapeProto_Dim& from);
void MergeFrom(const TensorShapeProto_Dim& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(TensorShapeProto_Dim* other);
void UnsafeMergeFrom(const TensorShapeProto_Dim& from);
protected:
explicit TensorShapeProto_Dim(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional int64 size = 1;
void clear_size();
static const int kSizeFieldNumber = 1;
::google::protobuf::int64 size() const;
void set_size(::google::protobuf::int64 value);
// optional string name = 2;
void clear_name();
static const int kNameFieldNumber = 2;
const ::std::string& name() const;
void set_name(const ::std::string& value);
void set_name(const char* value);
void set_name(const char* value, size_t size);
::std::string* mutable_name();
::std::string* release_name();
void set_allocated_name(::std::string* name);
::std::string* unsafe_arena_release_name();
void unsafe_arena_set_allocated_name(
::std::string* name);
// @@protoc_insertion_point(class_scope:tensorflow.TensorShapeProto.Dim)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::internal::ArenaStringPtr name_;
::google::protobuf::int64 size_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_tensor_5fshape_2eproto_impl();
friend void protobuf_AddDesc_tensor_5fshape_2eproto_impl();
friend void protobuf_AssignDesc_tensor_5fshape_2eproto();
friend void protobuf_ShutdownFile_tensor_5fshape_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<TensorShapeProto_Dim> TensorShapeProto_Dim_default_instance_;
// -------------------------------------------------------------------
class TensorShapeProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.TensorShapeProto) */ {
public:
TensorShapeProto();
virtual ~TensorShapeProto();
TensorShapeProto(const TensorShapeProto& from);
inline TensorShapeProto& operator=(const TensorShapeProto& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const TensorShapeProto& default_instance();
static const TensorShapeProto* internal_default_instance();
void UnsafeArenaSwap(TensorShapeProto* other);
void Swap(TensorShapeProto* other);
// implements Message ----------------------------------------------
inline TensorShapeProto* New() const { return New(NULL); }
TensorShapeProto* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const TensorShapeProto& from);
void MergeFrom(const TensorShapeProto& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(TensorShapeProto* other);
void UnsafeMergeFrom(const TensorShapeProto& from);
protected:
explicit TensorShapeProto(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
typedef TensorShapeProto_Dim Dim;
// accessors -------------------------------------------------------
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
int dim_size() const;
void clear_dim();
static const int kDimFieldNumber = 2;
const ::tensorflow::TensorShapeProto_Dim& dim(int index) const;
::tensorflow::TensorShapeProto_Dim* mutable_dim(int index);
::tensorflow::TensorShapeProto_Dim* add_dim();
::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >*
mutable_dim();
const ::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >&
dim() const;
// optional bool unknown_rank = 3;
void clear_unknown_rank();
static const int kUnknownRankFieldNumber = 3;
bool unknown_rank() const;
void set_unknown_rank(bool value);
// @@protoc_insertion_point(class_scope:tensorflow.TensorShapeProto)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim > dim_;
bool unknown_rank_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_tensor_5fshape_2eproto_impl();
friend void protobuf_AddDesc_tensor_5fshape_2eproto_impl();
friend void protobuf_AssignDesc_tensor_5fshape_2eproto();
friend void protobuf_ShutdownFile_tensor_5fshape_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<TensorShapeProto> TensorShapeProto_default_instance_;
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
// TensorShapeProto_Dim
// optional int64 size = 1;
inline void TensorShapeProto_Dim::clear_size() {
size_ = GOOGLE_LONGLONG(0);
}
inline ::google::protobuf::int64 TensorShapeProto_Dim::size() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.Dim.size)
return size_;
}
inline void TensorShapeProto_Dim::set_size(::google::protobuf::int64 value) {
size_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.Dim.size)
}
// optional string name = 2;
inline void TensorShapeProto_Dim::clear_name() {
name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline const ::std::string& TensorShapeProto_Dim::name() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.Dim.name)
return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void TensorShapeProto_Dim::set_name(const ::std::string& value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.Dim.name)
}
inline void TensorShapeProto_Dim::set_name(const char* value) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_char:tensorflow.TensorShapeProto.Dim.name)
}
inline void TensorShapeProto_Dim::set_name(const char* value,
size_t size) {
name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_pointer:tensorflow.TensorShapeProto.Dim.name)
}
inline ::std::string* TensorShapeProto_Dim::mutable_name() {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorShapeProto.Dim.name)
return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* TensorShapeProto_Dim::release_name() {
// @@protoc_insertion_point(field_release:tensorflow.TensorShapeProto.Dim.name)
return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
}
inline ::std::string* TensorShapeProto_Dim::unsafe_arena_release_name() {
// @@protoc_insertion_point(field_unsafe_arena_release:tensorflow.TensorShapeProto.Dim.name)
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
}
inline void TensorShapeProto_Dim::set_allocated_name(::std::string* name) {
if (name != NULL) {
} else {
}
name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,
GetArenaNoVirtual());
// @@protoc_insertion_point(field_set_allocated:tensorflow.TensorShapeProto.Dim.name)
}
inline void TensorShapeProto_Dim::unsafe_arena_set_allocated_name(
::std::string* name) {
GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);
if (name != NULL) {
} else {
}
name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
name, GetArenaNoVirtual());
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TensorShapeProto.Dim.name)
}
inline const TensorShapeProto_Dim* TensorShapeProto_Dim::internal_default_instance() {
return &TensorShapeProto_Dim_default_instance_.get();
}
// -------------------------------------------------------------------
// TensorShapeProto
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
inline int TensorShapeProto::dim_size() const {
return dim_.size();
}
inline void TensorShapeProto::clear_dim() {
dim_.Clear();
}
inline const ::tensorflow::TensorShapeProto_Dim& TensorShapeProto::dim(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.dim)
return dim_.Get(index);
}
inline ::tensorflow::TensorShapeProto_Dim* TensorShapeProto::mutable_dim(int index) {
// @@protoc_insertion_point(field_mutable:tensorflow.TensorShapeProto.dim)
return dim_.Mutable(index);
}
inline ::tensorflow::TensorShapeProto_Dim* TensorShapeProto::add_dim() {
// @@protoc_insertion_point(field_add:tensorflow.TensorShapeProto.dim)
return dim_.Add();
}
inline ::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >*
TensorShapeProto::mutable_dim() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.TensorShapeProto.dim)
return &dim_;
}
inline const ::google::protobuf::RepeatedPtrField< ::tensorflow::TensorShapeProto_Dim >&
TensorShapeProto::dim() const {
// @@protoc_insertion_point(field_list:tensorflow.TensorShapeProto.dim)
return dim_;
}
// optional bool unknown_rank = 3;
inline void TensorShapeProto::clear_unknown_rank() {
unknown_rank_ = false;
}
inline bool TensorShapeProto::unknown_rank() const {
// @@protoc_insertion_point(field_get:tensorflow.TensorShapeProto.unknown_rank)
return unknown_rank_;
}
inline void TensorShapeProto::set_unknown_rank(bool value) {
unknown_rank_ = value;
// @@protoc_insertion_point(field_set:tensorflow.TensorShapeProto.unknown_rank)
}
inline const TensorShapeProto* TensorShapeProto::internal_default_instance() {
return &TensorShapeProto_default_instance_.get();
}
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_tensor_5fshape_2eproto__INCLUDED

View File

@ -0,0 +1,163 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: types.proto
#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
#include "types.pb.h"
#include <algorithm>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/port.h>
#include <google/protobuf/stubs/once.h>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/wire_format_lite_inl.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/generated_message_reflection.h>
#include <google/protobuf/reflection_ops.h>
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
namespace {
const ::google::protobuf::EnumDescriptor* DataType_descriptor_ = NULL;
} // namespace
void protobuf_AssignDesc_types_2eproto() GOOGLE_ATTRIBUTE_COLD;
void protobuf_AssignDesc_types_2eproto() {
protobuf_AddDesc_types_2eproto();
const ::google::protobuf::FileDescriptor* file =
::google::protobuf::DescriptorPool::generated_pool()->FindFileByName(
"types.proto");
GOOGLE_CHECK(file != NULL);
DataType_descriptor_ = file->enum_type(0);
}
namespace {
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_);
void protobuf_AssignDescriptorsOnce() {
::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_,
&protobuf_AssignDesc_types_2eproto);
}
void protobuf_RegisterTypes(const ::std::string&) GOOGLE_ATTRIBUTE_COLD;
void protobuf_RegisterTypes(const ::std::string&) {
protobuf_AssignDescriptorsOnce();
}
} // namespace
void protobuf_ShutdownFile_types_2eproto() {
}
void protobuf_InitDefaults_types_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_InitDefaults_types_2eproto_once_);
void protobuf_InitDefaults_types_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_InitDefaults_types_2eproto_once_,
&protobuf_InitDefaults_types_2eproto_impl);
}
void protobuf_AddDesc_types_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
protobuf_InitDefaults_types_2eproto();
::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
"\n\013types.proto\022\ntensorflow*\234\005\n\010DataType\022\016"
"\n\nDT_INVALID\020\000\022\014\n\010DT_FLOAT\020\001\022\r\n\tDT_DOUBL"
"E\020\002\022\014\n\010DT_INT32\020\003\022\014\n\010DT_UINT8\020\004\022\014\n\010DT_IN"
"T16\020\005\022\013\n\007DT_INT8\020\006\022\r\n\tDT_STRING\020\007\022\020\n\014DT_"
"COMPLEX64\020\010\022\014\n\010DT_INT64\020\t\022\013\n\007DT_BOOL\020\n\022\014"
"\n\010DT_QINT8\020\013\022\r\n\tDT_QUINT8\020\014\022\r\n\tDT_QINT32"
"\020\r\022\017\n\013DT_BFLOAT16\020\016\022\r\n\tDT_QINT16\020\017\022\016\n\nDT"
"_QUINT16\020\020\022\r\n\tDT_UINT16\020\021\022\021\n\rDT_COMPLEX1"
"28\020\022\022\013\n\007DT_HALF\020\023\022\020\n\014DT_FLOAT_REF\020e\022\021\n\rD"
"T_DOUBLE_REF\020f\022\020\n\014DT_INT32_REF\020g\022\020\n\014DT_U"
"INT8_REF\020h\022\020\n\014DT_INT16_REF\020i\022\017\n\013DT_INT8_"
"REF\020j\022\021\n\rDT_STRING_REF\020k\022\024\n\020DT_COMPLEX64"
"_REF\020l\022\020\n\014DT_INT64_REF\020m\022\017\n\013DT_BOOL_REF\020"
"n\022\020\n\014DT_QINT8_REF\020o\022\021\n\rDT_QUINT8_REF\020p\022\021"
"\n\rDT_QINT32_REF\020q\022\023\n\017DT_BFLOAT16_REF\020r\022\021"
"\n\rDT_QINT16_REF\020s\022\022\n\016DT_QUINT16_REF\020t\022\021\n"
"\rDT_UINT16_REF\020u\022\025\n\021DT_COMPLEX128_REF\020v\022"
"\017\n\013DT_HALF_REF\020wB,\n\030org.tensorflow.frame"
"workB\013TypesProtosP\001\370\001\001b\006proto3", 750);
::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
"types.proto", &protobuf_RegisterTypes);
::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_types_2eproto);
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AddDesc_types_2eproto_once_);
void protobuf_AddDesc_types_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_AddDesc_types_2eproto_once_,
&protobuf_AddDesc_types_2eproto_impl);
}
// Force AddDescriptors() to be called at static initialization time.
struct StaticDescriptorInitializer_types_2eproto {
StaticDescriptorInitializer_types_2eproto() {
protobuf_AddDesc_types_2eproto();
}
} static_descriptor_initializer_types_2eproto_;
const ::google::protobuf::EnumDescriptor* DataType_descriptor() {
protobuf_AssignDescriptorsOnce();
return DataType_descriptor_;
}
bool DataType_IsValid(int value) {
switch (value) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
case 9:
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
case 16:
case 17:
case 18:
case 19:
case 101:
case 102:
case 103:
case 104:
case 105:
case 106:
case 107:
case 108:
case 109:
case 110:
case 111:
case 112:
case 113:
case 114:
case 115:
case 116:
case 117:
case 118:
case 119:
return true;
default:
return false;
}
}
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)

View File

@ -0,0 +1,129 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: types.proto
#ifndef PROTOBUF_types_2eproto__INCLUDED
#define PROTOBUF_types_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/generated_enum_reflection.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_types_2eproto();
void protobuf_InitDefaults_types_2eproto();
void protobuf_AssignDesc_types_2eproto();
void protobuf_ShutdownFile_types_2eproto();
enum DataType {
DT_INVALID = 0,
DT_FLOAT = 1,
DT_DOUBLE = 2,
DT_INT32 = 3,
DT_UINT8 = 4,
DT_INT16 = 5,
DT_INT8 = 6,
DT_STRING = 7,
DT_COMPLEX64 = 8,
DT_INT64 = 9,
DT_BOOL = 10,
DT_QINT8 = 11,
DT_QUINT8 = 12,
DT_QINT32 = 13,
DT_BFLOAT16 = 14,
DT_QINT16 = 15,
DT_QUINT16 = 16,
DT_UINT16 = 17,
DT_COMPLEX128 = 18,
DT_HALF = 19,
DT_FLOAT_REF = 101,
DT_DOUBLE_REF = 102,
DT_INT32_REF = 103,
DT_UINT8_REF = 104,
DT_INT16_REF = 105,
DT_INT8_REF = 106,
DT_STRING_REF = 107,
DT_COMPLEX64_REF = 108,
DT_INT64_REF = 109,
DT_BOOL_REF = 110,
DT_QINT8_REF = 111,
DT_QUINT8_REF = 112,
DT_QINT32_REF = 113,
DT_BFLOAT16_REF = 114,
DT_QINT16_REF = 115,
DT_QUINT16_REF = 116,
DT_UINT16_REF = 117,
DT_COMPLEX128_REF = 118,
DT_HALF_REF = 119,
DataType_INT_MIN_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32min,
DataType_INT_MAX_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32max
};
bool DataType_IsValid(int value);
const DataType DataType_MIN = DT_INVALID;
const DataType DataType_MAX = DT_HALF_REF;
const int DataType_ARRAYSIZE = DataType_MAX + 1;
const ::google::protobuf::EnumDescriptor* DataType_descriptor();
inline const ::std::string& DataType_Name(DataType value) {
return ::google::protobuf::internal::NameOfEnum(
DataType_descriptor(), value);
}
inline bool DataType_Parse(
const ::std::string& name, DataType* value) {
return ::google::protobuf::internal::ParseNamedEnum<DataType>(
DataType_descriptor(), name, value);
}
// ===================================================================
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
#ifndef SWIG
namespace google {
namespace protobuf {
template <> struct is_proto_enum< ::tensorflow::DataType> : ::google::protobuf::internal::true_type {};
template <>
inline const EnumDescriptor* GetEnumDescriptor< ::tensorflow::DataType>() {
return ::tensorflow::DataType_descriptor();
}
} // namespace protobuf
} // namespace google
#endif // SWIG
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_types_2eproto__INCLUDED

View File

@ -0,0 +1,572 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: versions.proto
#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
#include "versions.pb.h"
#include <algorithm>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/port.h>
#include <google/protobuf/stubs/once.h>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/wire_format_lite_inl.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/generated_message_reflection.h>
#include <google/protobuf/reflection_ops.h>
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
namespace {
const ::google::protobuf::Descriptor* VersionDef_descriptor_ = NULL;
const ::google::protobuf::internal::GeneratedMessageReflection*
VersionDef_reflection_ = NULL;
} // namespace
void protobuf_AssignDesc_versions_2eproto() GOOGLE_ATTRIBUTE_COLD;
void protobuf_AssignDesc_versions_2eproto() {
protobuf_AddDesc_versions_2eproto();
const ::google::protobuf::FileDescriptor* file =
::google::protobuf::DescriptorPool::generated_pool()->FindFileByName(
"versions.proto");
GOOGLE_CHECK(file != NULL);
VersionDef_descriptor_ = file->message_type(0);
static const int VersionDef_offsets_[3] = {
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(VersionDef, producer_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(VersionDef, min_consumer_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(VersionDef, bad_consumers_),
};
VersionDef_reflection_ =
::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
VersionDef_descriptor_,
VersionDef::internal_default_instance(),
VersionDef_offsets_,
-1,
-1,
-1,
sizeof(VersionDef),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(VersionDef, _internal_metadata_));
}
namespace {
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_);
void protobuf_AssignDescriptorsOnce() {
::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_,
&protobuf_AssignDesc_versions_2eproto);
}
void protobuf_RegisterTypes(const ::std::string&) GOOGLE_ATTRIBUTE_COLD;
void protobuf_RegisterTypes(const ::std::string&) {
protobuf_AssignDescriptorsOnce();
::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
VersionDef_descriptor_, VersionDef::internal_default_instance());
}
} // namespace
void protobuf_ShutdownFile_versions_2eproto() {
VersionDef_default_instance_.Shutdown();
delete VersionDef_reflection_;
}
void protobuf_InitDefaults_versions_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
VersionDef_default_instance_.DefaultConstruct();
VersionDef_default_instance_.get_mutable()->InitAsDefaultInstance();
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_InitDefaults_versions_2eproto_once_);
void protobuf_InitDefaults_versions_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_InitDefaults_versions_2eproto_once_,
&protobuf_InitDefaults_versions_2eproto_impl);
}
void protobuf_AddDesc_versions_2eproto_impl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
protobuf_InitDefaults_versions_2eproto();
::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
"\n\016versions.proto\022\ntensorflow\"K\n\nVersionD"
"ef\022\020\n\010producer\030\001 \001(\005\022\024\n\014min_consumer\030\002 \001"
"(\005\022\025\n\rbad_consumers\030\003 \003(\005B/\n\030org.tensorf"
"low.frameworkB\016VersionsProtosP\001\370\001\001b\006prot"
"o3", 162);
::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
"versions.proto", &protobuf_RegisterTypes);
::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_versions_2eproto);
}
GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AddDesc_versions_2eproto_once_);
void protobuf_AddDesc_versions_2eproto() {
::google::protobuf::GoogleOnceInit(&protobuf_AddDesc_versions_2eproto_once_,
&protobuf_AddDesc_versions_2eproto_impl);
}
// Force AddDescriptors() to be called at static initialization time.
struct StaticDescriptorInitializer_versions_2eproto {
StaticDescriptorInitializer_versions_2eproto() {
protobuf_AddDesc_versions_2eproto();
}
} static_descriptor_initializer_versions_2eproto_;
namespace {
static void MergeFromFail(int line) GOOGLE_ATTRIBUTE_COLD GOOGLE_ATTRIBUTE_NORETURN;
static void MergeFromFail(int line) {
::google::protobuf::internal::MergeFromFail(__FILE__, line);
}
} // namespace
// ===================================================================
#if !defined(_MSC_VER) || _MSC_VER >= 1900
const int VersionDef::kProducerFieldNumber;
const int VersionDef::kMinConsumerFieldNumber;
const int VersionDef::kBadConsumersFieldNumber;
#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
VersionDef::VersionDef()
: ::google::protobuf::Message(), _internal_metadata_(NULL) {
if (this != internal_default_instance()) protobuf_InitDefaults_versions_2eproto();
SharedCtor();
// @@protoc_insertion_point(constructor:tensorflow.VersionDef)
}
VersionDef::VersionDef(::google::protobuf::Arena* arena)
: ::google::protobuf::Message(),
_internal_metadata_(arena),
bad_consumers_(arena) {
#ifdef GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
protobuf_InitDefaults_versions_2eproto();
#endif // GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
SharedCtor();
RegisterArenaDtor(arena);
// @@protoc_insertion_point(arena_constructor:tensorflow.VersionDef)
}
void VersionDef::InitAsDefaultInstance() {
}
VersionDef::VersionDef(const VersionDef& from)
: ::google::protobuf::Message(),
_internal_metadata_(NULL) {
SharedCtor();
UnsafeMergeFrom(from);
// @@protoc_insertion_point(copy_constructor:tensorflow.VersionDef)
}
void VersionDef::SharedCtor() {
::memset(&producer_, 0, reinterpret_cast<char*>(&min_consumer_) -
reinterpret_cast<char*>(&producer_) + sizeof(min_consumer_));
_cached_size_ = 0;
}
VersionDef::~VersionDef() {
// @@protoc_insertion_point(destructor:tensorflow.VersionDef)
SharedDtor();
}
void VersionDef::SharedDtor() {
::google::protobuf::Arena* arena = GetArenaNoVirtual();
if (arena != NULL) {
return;
}
}
void VersionDef::ArenaDtor(void* object) {
VersionDef* _this = reinterpret_cast< VersionDef* >(object);
(void)_this;
}
void VersionDef::RegisterArenaDtor(::google::protobuf::Arena* arena) {
}
void VersionDef::SetCachedSize(int size) const {
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
}
const ::google::protobuf::Descriptor* VersionDef::descriptor() {
protobuf_AssignDescriptorsOnce();
return VersionDef_descriptor_;
}
const VersionDef& VersionDef::default_instance() {
protobuf_InitDefaults_versions_2eproto();
return *internal_default_instance();
}
::google::protobuf::internal::ExplicitlyConstructed<VersionDef> VersionDef_default_instance_;
VersionDef* VersionDef::New(::google::protobuf::Arena* arena) const {
return ::google::protobuf::Arena::CreateMessage<VersionDef>(arena);
}
void VersionDef::Clear() {
// @@protoc_insertion_point(message_clear_start:tensorflow.VersionDef)
#if defined(__clang__)
#define ZR_HELPER_(f) \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Winvalid-offsetof\"") \
__builtin_offsetof(VersionDef, f) \
_Pragma("clang diagnostic pop")
#else
#define ZR_HELPER_(f) reinterpret_cast<char*>(\
&reinterpret_cast<VersionDef*>(16)->f)
#endif
#define ZR_(first, last) do {\
::memset(&(first), 0,\
ZR_HELPER_(last) - ZR_HELPER_(first) + sizeof(last));\
} while (0)
ZR_(producer_, min_consumer_);
#undef ZR_HELPER_
#undef ZR_
bad_consumers_.Clear();
}
bool VersionDef::MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
::google::protobuf::uint32 tag;
// @@protoc_insertion_point(parse_start:tensorflow.VersionDef)
for (;;) {
::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// optional int32 producer = 1;
case 1: {
if (tag == 8) {
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
input, &producer_)));
} else {
goto handle_unusual;
}
if (input->ExpectTag(16)) goto parse_min_consumer;
break;
}
// optional int32 min_consumer = 2;
case 2: {
if (tag == 16) {
parse_min_consumer:
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
input, &min_consumer_)));
} else {
goto handle_unusual;
}
if (input->ExpectTag(26)) goto parse_bad_consumers;
break;
}
// repeated int32 bad_consumers = 3;
case 3: {
if (tag == 26) {
parse_bad_consumers:
DO_((::google::protobuf::internal::WireFormatLite::ReadPackedPrimitive<
::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
input, this->mutable_bad_consumers())));
} else if (tag == 24) {
DO_((::google::protobuf::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
1, 26, input, this->mutable_bad_consumers())));
} else {
goto handle_unusual;
}
if (input->ExpectAtEnd()) goto success;
break;
}
default: {
handle_unusual:
if (tag == 0 ||
::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
goto success;
}
DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:tensorflow.VersionDef)
return true;
failure:
// @@protoc_insertion_point(parse_failure:tensorflow.VersionDef)
return false;
#undef DO_
}
void VersionDef::SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:tensorflow.VersionDef)
// optional int32 producer = 1;
if (this->producer() != 0) {
::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->producer(), output);
}
// optional int32 min_consumer = 2;
if (this->min_consumer() != 0) {
::google::protobuf::internal::WireFormatLite::WriteInt32(2, this->min_consumer(), output);
}
// repeated int32 bad_consumers = 3;
if (this->bad_consumers_size() > 0) {
::google::protobuf::internal::WireFormatLite::WriteTag(3, ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
output->WriteVarint32(_bad_consumers_cached_byte_size_);
}
for (int i = 0; i < this->bad_consumers_size(); i++) {
::google::protobuf::internal::WireFormatLite::WriteInt32NoTag(
this->bad_consumers(i), output);
}
// @@protoc_insertion_point(serialize_end:tensorflow.VersionDef)
}
::google::protobuf::uint8* VersionDef::InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* target) const {
(void)deterministic; // Unused
// @@protoc_insertion_point(serialize_to_array_start:tensorflow.VersionDef)
// optional int32 producer = 1;
if (this->producer() != 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->producer(), target);
}
// optional int32 min_consumer = 2;
if (this->min_consumer() != 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(2, this->min_consumer(), target);
}
// repeated int32 bad_consumers = 3;
if (this->bad_consumers_size() > 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteTagToArray(
3,
::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
target);
target = ::google::protobuf::io::CodedOutputStream::WriteVarint32ToArray(
_bad_consumers_cached_byte_size_, target);
}
for (int i = 0; i < this->bad_consumers_size(); i++) {
target = ::google::protobuf::internal::WireFormatLite::
WriteInt32NoTagToArray(this->bad_consumers(i), target);
}
// @@protoc_insertion_point(serialize_to_array_end:tensorflow.VersionDef)
return target;
}
size_t VersionDef::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:tensorflow.VersionDef)
size_t total_size = 0;
// optional int32 producer = 1;
if (this->producer() != 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::Int32Size(
this->producer());
}
// optional int32 min_consumer = 2;
if (this->min_consumer() != 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::Int32Size(
this->min_consumer());
}
// repeated int32 bad_consumers = 3;
{
size_t data_size = 0;
unsigned int count = this->bad_consumers_size();
for (unsigned int i = 0; i < count; i++) {
data_size += ::google::protobuf::internal::WireFormatLite::
Int32Size(this->bad_consumers(i));
}
if (data_size > 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::Int32Size(data_size);
}
int cached_size = ::google::protobuf::internal::ToCachedSize(data_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_bad_consumers_cached_byte_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
total_size += data_size;
}
int cached_size = ::google::protobuf::internal::ToCachedSize(total_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
return total_size;
}
void VersionDef::MergeFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.VersionDef)
if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
const VersionDef* source =
::google::protobuf::internal::DynamicCastToGenerated<const VersionDef>(
&from);
if (source == NULL) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.VersionDef)
::google::protobuf::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.VersionDef)
UnsafeMergeFrom(*source);
}
}
void VersionDef::MergeFrom(const VersionDef& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.VersionDef)
if (GOOGLE_PREDICT_TRUE(&from != this)) {
UnsafeMergeFrom(from);
} else {
MergeFromFail(__LINE__);
}
}
void VersionDef::UnsafeMergeFrom(const VersionDef& from) {
GOOGLE_DCHECK(&from != this);
bad_consumers_.UnsafeMergeFrom(from.bad_consumers_);
if (from.producer() != 0) {
set_producer(from.producer());
}
if (from.min_consumer() != 0) {
set_min_consumer(from.min_consumer());
}
}
void VersionDef::CopyFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.VersionDef)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void VersionDef::CopyFrom(const VersionDef& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.VersionDef)
if (&from == this) return;
Clear();
UnsafeMergeFrom(from);
}
bool VersionDef::IsInitialized() const {
return true;
}
void VersionDef::Swap(VersionDef* other) {
if (other == this) return;
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
InternalSwap(other);
} else {
VersionDef temp;
temp.UnsafeMergeFrom(*this);
CopyFrom(*other);
other->CopyFrom(temp);
}
}
void VersionDef::UnsafeArenaSwap(VersionDef* other) {
if (other == this) return;
GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());
InternalSwap(other);
}
void VersionDef::InternalSwap(VersionDef* other) {
std::swap(producer_, other->producer_);
std::swap(min_consumer_, other->min_consumer_);
bad_consumers_.UnsafeArenaSwap(&other->bad_consumers_);
_internal_metadata_.Swap(&other->_internal_metadata_);
std::swap(_cached_size_, other->_cached_size_);
}
::google::protobuf::Metadata VersionDef::GetMetadata() const {
protobuf_AssignDescriptorsOnce();
::google::protobuf::Metadata metadata;
metadata.descriptor = VersionDef_descriptor_;
metadata.reflection = VersionDef_reflection_;
return metadata;
}
#if PROTOBUF_INLINE_NOT_IN_HEADERS
// VersionDef
// optional int32 producer = 1;
void VersionDef::clear_producer() {
producer_ = 0;
}
::google::protobuf::int32 VersionDef::producer() const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.producer)
return producer_;
}
void VersionDef::set_producer(::google::protobuf::int32 value) {
producer_ = value;
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.producer)
}
// optional int32 min_consumer = 2;
void VersionDef::clear_min_consumer() {
min_consumer_ = 0;
}
::google::protobuf::int32 VersionDef::min_consumer() const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.min_consumer)
return min_consumer_;
}
void VersionDef::set_min_consumer(::google::protobuf::int32 value) {
min_consumer_ = value;
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.min_consumer)
}
// repeated int32 bad_consumers = 3;
int VersionDef::bad_consumers_size() const {
return bad_consumers_.size();
}
void VersionDef::clear_bad_consumers() {
bad_consumers_.Clear();
}
::google::protobuf::int32 VersionDef::bad_consumers(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.bad_consumers)
return bad_consumers_.Get(index);
}
void VersionDef::set_bad_consumers(int index, ::google::protobuf::int32 value) {
bad_consumers_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.bad_consumers)
}
void VersionDef::add_bad_consumers(::google::protobuf::int32 value) {
bad_consumers_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.VersionDef.bad_consumers)
}
const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
VersionDef::bad_consumers() const {
// @@protoc_insertion_point(field_list:tensorflow.VersionDef.bad_consumers)
return bad_consumers_;
}
::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
VersionDef::mutable_bad_consumers() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.VersionDef.bad_consumers)
return &bad_consumers_;
}
inline const VersionDef* VersionDef::internal_default_instance() {
return &VersionDef_default_instance_.get();
}
#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)

View File

@ -0,0 +1,239 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: versions.proto
#ifndef PROTOBUF_versions_2eproto__INCLUDED
#define PROTOBUF_versions_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3001000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata.h>
#include <google/protobuf/message.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/unknown_field_set.h>
// @@protoc_insertion_point(includes)
namespace tensorflow {
// Internal implementation detail -- do not call these.
void protobuf_AddDesc_versions_2eproto();
void protobuf_InitDefaults_versions_2eproto();
void protobuf_AssignDesc_versions_2eproto();
void protobuf_ShutdownFile_versions_2eproto();
class VersionDef;
// ===================================================================
class VersionDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:tensorflow.VersionDef) */ {
public:
VersionDef();
virtual ~VersionDef();
VersionDef(const VersionDef& from);
inline VersionDef& operator=(const VersionDef& from) {
CopyFrom(from);
return *this;
}
inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }
inline void* GetMaybeArenaPointer() const {
return MaybeArenaPtr();
}
static const ::google::protobuf::Descriptor* descriptor();
static const VersionDef& default_instance();
static const VersionDef* internal_default_instance();
void UnsafeArenaSwap(VersionDef* other);
void Swap(VersionDef* other);
// implements Message ----------------------------------------------
inline VersionDef* New() const { return New(NULL); }
VersionDef* New(::google::protobuf::Arena* arena) const;
void CopyFrom(const ::google::protobuf::Message& from);
void MergeFrom(const ::google::protobuf::Message& from);
void CopyFrom(const VersionDef& from);
void MergeFrom(const VersionDef& from);
void Clear();
bool IsInitialized() const;
size_t ByteSizeLong() const;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input);
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const;
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* output) const;
::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {
return InternalSerializeWithCachedSizesToArray(false, output);
}
int GetCachedSize() const { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VersionDef* other);
void UnsafeMergeFrom(const VersionDef& from);
protected:
explicit VersionDef(::google::protobuf::Arena* arena);
private:
static void ArenaDtor(void* object);
inline void RegisterArenaDtor(::google::protobuf::Arena* arena);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const {
return _internal_metadata_.arena();
}
inline void* MaybeArenaPtr() const {
return _internal_metadata_.raw_arena_ptr();
}
public:
::google::protobuf::Metadata GetMetadata() const;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional int32 producer = 1;
void clear_producer();
static const int kProducerFieldNumber = 1;
::google::protobuf::int32 producer() const;
void set_producer(::google::protobuf::int32 value);
// optional int32 min_consumer = 2;
void clear_min_consumer();
static const int kMinConsumerFieldNumber = 2;
::google::protobuf::int32 min_consumer() const;
void set_min_consumer(::google::protobuf::int32 value);
// repeated int32 bad_consumers = 3;
int bad_consumers_size() const;
void clear_bad_consumers();
static const int kBadConsumersFieldNumber = 3;
::google::protobuf::int32 bad_consumers(int index) const;
void set_bad_consumers(int index, ::google::protobuf::int32 value);
void add_bad_consumers(::google::protobuf::int32 value);
const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
bad_consumers() const;
::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
mutable_bad_consumers();
// @@protoc_insertion_point(class_scope:tensorflow.VersionDef)
private:
::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;
friend class ::google::protobuf::Arena;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
::google::protobuf::RepeatedField< ::google::protobuf::int32 > bad_consumers_;
mutable int _bad_consumers_cached_byte_size_;
::google::protobuf::int32 producer_;
::google::protobuf::int32 min_consumer_;
mutable int _cached_size_;
friend void protobuf_InitDefaults_versions_2eproto_impl();
friend void protobuf_AddDesc_versions_2eproto_impl();
friend void protobuf_AssignDesc_versions_2eproto();
friend void protobuf_ShutdownFile_versions_2eproto();
void InitAsDefaultInstance();
};
extern ::google::protobuf::internal::ExplicitlyConstructed<VersionDef> VersionDef_default_instance_;
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
// VersionDef
// optional int32 producer = 1;
inline void VersionDef::clear_producer() {
producer_ = 0;
}
inline ::google::protobuf::int32 VersionDef::producer() const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.producer)
return producer_;
}
inline void VersionDef::set_producer(::google::protobuf::int32 value) {
producer_ = value;
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.producer)
}
// optional int32 min_consumer = 2;
inline void VersionDef::clear_min_consumer() {
min_consumer_ = 0;
}
inline ::google::protobuf::int32 VersionDef::min_consumer() const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.min_consumer)
return min_consumer_;
}
inline void VersionDef::set_min_consumer(::google::protobuf::int32 value) {
min_consumer_ = value;
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.min_consumer)
}
// repeated int32 bad_consumers = 3;
inline int VersionDef::bad_consumers_size() const {
return bad_consumers_.size();
}
inline void VersionDef::clear_bad_consumers() {
bad_consumers_.Clear();
}
inline ::google::protobuf::int32 VersionDef::bad_consumers(int index) const {
// @@protoc_insertion_point(field_get:tensorflow.VersionDef.bad_consumers)
return bad_consumers_.Get(index);
}
inline void VersionDef::set_bad_consumers(int index, ::google::protobuf::int32 value) {
bad_consumers_.Set(index, value);
// @@protoc_insertion_point(field_set:tensorflow.VersionDef.bad_consumers)
}
inline void VersionDef::add_bad_consumers(::google::protobuf::int32 value) {
bad_consumers_.Add(value);
// @@protoc_insertion_point(field_add:tensorflow.VersionDef.bad_consumers)
}
inline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
VersionDef::bad_consumers() const {
// @@protoc_insertion_point(field_list:tensorflow.VersionDef.bad_consumers)
return bad_consumers_;
}
inline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
VersionDef::mutable_bad_consumers() {
// @@protoc_insertion_point(field_mutable_list:tensorflow.VersionDef.bad_consumers)
return &bad_consumers_;
}
inline const VersionDef* VersionDef::internal_default_instance() {
return &VersionDef_default_instance_.get();
}
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace tensorflow
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_versions_2eproto__INCLUDED

View File

@ -0,0 +1,106 @@
#include "perf_precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cvtest
{
using std::tr1::tuple;
using std::tr1::get;
using std::tr1::make_tuple;
using std::make_pair;
using namespace perf;
using namespace testing;
using namespace cv;
using namespace cv::dnn;
enum {STRIDE_OFF = 1, STRIDE_ON = 2};
CV_ENUM(StrideSize, STRIDE_OFF, STRIDE_ON);
enum {GROUP_OFF = 1, GROUP_2 = 2};
CV_ENUM(GroupSize, GROUP_OFF, GROUP_2);
//Squared Size
#define SSZ(n) cv::Size(n, n)
typedef std::pair<MatShape, int> InpShapeNumOut;
typedef tuple<Size, InpShapeNumOut, GroupSize, StrideSize> ConvParam; //kernel_size, inp shape, groups, stride
typedef TestBaseWithParam<ConvParam> ConvolutionPerfTest;
static inline MatShape blobShape(int count, int nplanes, int height, int width)
{
int data[] = {count, nplanes, height, width};
return MatShape(data, data+4);
}
PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
Values(Size(1, 1), Size(3, 3), Size(5, 5), Size(11, 11)),
Values(make_pair(blobShape(1, 4, 224, 224), 64),
make_pair(blobShape(1, 64, 112, 122), 128),
make_pair(blobShape(1, 256, 28, 28), 512)),
GroupSize::all(),
StrideSize::all())
)
{
RNG rng(0);
ConvParam params = GetParam();
int ksz = get<0>(params).width;
MatShape inpShape = get<1>(params).first;
int outCn = get<1>(params).second;
int groups = get<2>(params);
int stride = (ksz >= 11) ? 4 : (int)get<3>(params);
int inpCn = inpShape[1];
int wgtSize[] = { outCn, inpCn/groups, ksz, ksz };
int biasSize[] = { outCn, 1, 1, 1 };
const int wtype = CV_32F;
Mat wgtBlob(4, wgtSize, wtype), biasBlob(4, biasSize, wtype);
Mat inpBlob(4, &inpShape[0], wtype);
rng.fill(biasBlob, RNG::UNIFORM, -1, +1);
rng.fill(wgtBlob, RNG::UNIFORM, -1, +1);
rng.fill(inpBlob, RNG::UNIFORM, -1, +1);
LayerParams lp;
lp.set("num_output", outCn);
lp.set("group", groups);
lp.set("stride", stride);
lp.set("kernel_size", ksz);
lp.blobs.reserve(2);
lp.blobs.push_back(wgtBlob);
lp.blobs.push_back(biasBlob);
std::vector<Mat*> inpBlobs(1, &inpBlob);
std::vector<Mat> outBlobs, internalBlobs;
cv::setNumThreads(cv::getNumberOfCPUs());
Ptr<Layer> layer = cv::dnn::LayerFactory::createLayerInstance("Convolution", lp);
std::vector<MatShape> inputShapes(1, shape(inpBlob)), outShapes, internals;
layer->getMemoryShapes(inputShapes, 0, outShapes, internals);
for (int i = 0; i < outShapes.size(); i++)
{
outBlobs.push_back(Mat(outShapes[i], CV_32F));
}
for (int i = 0; i < internals.size(); i++)
{
internalBlobs.push_back(Mat());
if (total(internals[i]))
internalBlobs.back().create(internals[i], CV_32F);
}
layer->finalize(inpBlobs, outBlobs);
Mat inpBlob2D = inpBlob.reshape(1, outCn);
Mat wgtBlob2D = wgtBlob.reshape(1, outCn*(inpCn/groups));
Mat outBlob2D = outBlobs[0].reshape(1, outBlobs[0].size[0]);
declare.in(inpBlob2D, wgtBlob2D, WARMUP_RNG).out(outBlob2D).tbb_threads(cv::getNumThreads());
TEST_CYCLE_N(10)
{
layer->forward(inpBlobs, outBlobs, internalBlobs);
}
SANITY_CHECK_NOTHING();
}
}

View File

@ -0,0 +1,172 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
namespace cvtest
{
#ifdef HAVE_HALIDE
using namespace cv;
using namespace dnn;
static void loadNet(std::string weights, std::string proto, std::string scheduler,
int inWidth, int inHeight, const std::string& outputLayer,
const std::string& framework, int targetId, Net* net)
{
Mat input(inHeight, inWidth, CV_32FC3);
randu(input, 0.0f, 1.0f);
weights = findDataFile(weights, false);
if (!proto.empty())
proto = findDataFile(proto, false);
if (!scheduler.empty())
scheduler = findDataFile(scheduler, false);
if (framework == "caffe")
{
*net = cv::dnn::readNetFromCaffe(proto, weights);
}
else if (framework == "torch")
{
*net = cv::dnn::readNetFromTorch(weights);
}
else if (framework == "tensorflow")
{
*net = cv::dnn::readNetFromTensorflow(weights);
}
else
CV_Error(Error::StsNotImplemented, "Unknown framework " + framework);
net->setInput(blobFromImage(input, 1.0, Size(), Scalar(), false));
net->setPreferableBackend(DNN_BACKEND_HALIDE);
net->setPreferableTarget(targetId);
net->setHalideScheduler(scheduler);
net->forward(outputLayer);
}
////////////////////////////////////////////////////////////////////////////////
// CPU target
////////////////////////////////////////////////////////////////////////////////
PERF_TEST(GoogLeNet, HalidePerfTest)
{
Net net;
loadNet("dnn/bvlc_googlenet.caffemodel", "dnn/bvlc_googlenet.prototxt",
"", 227, 227, "prob", "caffe", DNN_TARGET_CPU, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
PERF_TEST(AlexNet, HalidePerfTest)
{
Net net;
loadNet("dnn/bvlc_alexnet.caffemodel", "dnn/bvlc_alexnet.prototxt",
"dnn/halide_scheduler_alexnet.yml", 227, 227, "prob", "caffe",
DNN_TARGET_CPU, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
PERF_TEST(ResNet50, HalidePerfTest)
{
Net net;
loadNet("dnn/ResNet-50-model.caffemodel", "dnn/ResNet-50-deploy.prototxt",
"dnn/halide_scheduler_resnet_50.yml", 224, 224, "prob", "caffe",
DNN_TARGET_CPU, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
PERF_TEST(SqueezeNet_v1_1, HalidePerfTest)
{
Net net;
loadNet("dnn/squeezenet_v1_1.caffemodel", "dnn/squeezenet_v1_1.prototxt",
"dnn/halide_scheduler_squeezenet_v1_1.yml", 227, 227, "prob",
"caffe", DNN_TARGET_CPU, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
PERF_TEST(Inception_5h, HalidePerfTest)
{
Net net;
loadNet("dnn/tensorflow_inception_graph.pb", "",
"dnn/halide_scheduler_inception_5h.yml",
224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU, &net);
TEST_CYCLE() net.forward("softmax2");
SANITY_CHECK_NOTHING();
}
PERF_TEST(ENet, HalidePerfTest)
{
Net net;
loadNet("dnn/Enet-model-best.net", "", "dnn/halide_scheduler_enet.yml",
512, 256, "l367_Deconvolution", "torch", DNN_TARGET_CPU, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
////////////////////////////////////////////////////////////////////////////////
// OpenCL target
////////////////////////////////////////////////////////////////////////////////
PERF_TEST(GoogLeNet_opencl, HalidePerfTest)
{
Net net;
loadNet("dnn/bvlc_googlenet.caffemodel", "dnn/bvlc_googlenet.prototxt",
"", 227, 227, "prob", "caffe", DNN_TARGET_OPENCL, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
PERF_TEST(AlexNet_opencl, HalidePerfTest)
{
Net net;
loadNet("dnn/bvlc_alexnet.caffemodel", "dnn/bvlc_alexnet.prototxt",
"dnn/halide_scheduler_opencl_alexnet.yml", 227, 227, "prob", "caffe",
DNN_TARGET_OPENCL, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
PERF_TEST(ResNet50_opencl, HalidePerfTest)
{
Net net;
loadNet("dnn/ResNet-50-model.caffemodel", "dnn/ResNet-50-deploy.prototxt",
"dnn/halide_scheduler_opencl_resnet_50.yml", 224, 224, "prob", "caffe",
DNN_TARGET_OPENCL, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
PERF_TEST(SqueezeNet_v1_1_opencl, HalidePerfTest)
{
Net net;
loadNet("dnn/squeezenet_v1_1.caffemodel", "dnn/squeezenet_v1_1.prototxt",
"dnn/halide_scheduler_opencl_squeezenet_v1_1.yml", 227, 227, "prob",
"caffe", DNN_TARGET_OPENCL, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
PERF_TEST(Inception_5h_opencl, HalidePerfTest)
{
Net net;
loadNet("dnn/tensorflow_inception_graph.pb", "",
"dnn/halide_scheduler_opencl_inception_5h.yml",
224, 224, "softmax2", "tensorflow", DNN_TARGET_OPENCL, &net);
TEST_CYCLE() net.forward("softmax2");
SANITY_CHECK_NOTHING();
}
PERF_TEST(ENet_opencl, HalidePerfTest)
{
Net net;
loadNet("dnn/Enet-model-best.net", "", "dnn/halide_scheduler_opencl_enet.yml",
512, 256, "l367_Deconvolution", "torch", DNN_TARGET_OPENCL, &net);
TEST_CYCLE() net.forward();
SANITY_CHECK_NOTHING();
}
#endif // HAVE_HALIDE
} // namespace cvtest

View File

@ -0,0 +1,12 @@
#include "perf_precomp.hpp"
static const char* extraTestDataPath =
#ifdef WINRT
NULL;
#else
getenv("OPENCV_DNN_TEST_DATA_PATH");
#endif
CV_PERF_TEST_MAIN(dnn,
extraTestDataPath ? (void)cvtest::addDataSearchPath(extraTestDataPath) : (void)0
)

View File

@ -0,0 +1,17 @@
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include <opencv2/ts.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/dnn.hpp>
#endif

1
modules/dnn/samples/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.caffemodel

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,143 @@
/**M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
using namespace std;
/* Find best class for the blob (i. e. class with maximal probability) */
void getMaxClass(const Mat &probBlob, int *classId, double *classProb)
{
Mat probMat = probBlob.reshape(1, 1); //reshape the blob to 1x1000 matrix
Point classNumber;
minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
*classId = classNumber.x;
}
std::vector<String> readClassNames(const char *filename = "synset_words.txt")
{
std::vector<String> classNames;
std::ifstream fp(filename);
if (!fp.is_open())
{
std::cerr << "File with classes labels not found: " << filename << std::endl;
exit(-1);
}
std::string name;
while (!fp.eof())
{
std::getline(fp, name);
if (name.length())
classNames.push_back( name.substr(name.find(' ')+1) );
}
fp.close();
return classNames;
}
int main(int argc, char **argv)
{
cv::dnn::initModule(); //Required if OpenCV is built as static libs
String modelTxt = "bvlc_googlenet.prototxt";
String modelBin = "bvlc_googlenet.caffemodel";
String imageFile = (argc > 1) ? argv[1] : "space_shuttle.jpg";
//! [Read and initialize network]
Net net = dnn::readNetFromCaffe(modelTxt, modelBin);
//! [Read and initialize network]
//! [Check that network was read successfully]
if (net.empty())
{
std::cerr << "Can't load network by using the following files: " << std::endl;
std::cerr << "prototxt: " << modelTxt << std::endl;
std::cerr << "caffemodel: " << modelBin << std::endl;
std::cerr << "bvlc_googlenet.caffemodel can be downloaded here:" << std::endl;
std::cerr << "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel" << std::endl;
exit(-1);
}
//! [Check that network was read successfully]
//! [Prepare blob]
Mat img = imread(imageFile);
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
}
//GoogLeNet accepts only 224x224 RGB-images
Mat inputBlob = blobFromImage(img, 1, Size(224, 224),
Scalar(104, 117, 123)); //Convert Mat to batch of images
//! [Prepare blob]
//! [Set input blob]
net.setInput(inputBlob, "data"); //set the network input
//! [Set input blob]
//! [Make forward pass]
Mat prob = net.forward("prob"); //compute output
//! [Make forward pass]
//! [Gather output]
int classId;
double classProb;
getMaxClass(prob, &classId, &classProb);//find the best class
//! [Gather output]
//! [Print results]
std::vector<String> classNames = readClassNames();
std::cout << "Best class: #" << classId << " '" << classNames.at(classId) << "'" << std::endl;
std::cout << "Probability: " << classProb * 100 << "%" << std::endl;
//! [Print results]
return 0;
} //main

View File

@ -0,0 +1,20 @@
Unlabeled 0 0 0
Road 128 64 128
Sidewalk 244 35 232
Building 70 70 70
Wall 102 102 156
Fence 190 153 153
Pole 153 153 153
TrafficLight 250 170 30
TrafficSign 220 220 0
Vegetation 107 142 35
Terrain 152 251 152
Sky 70 130 180
Person 220 20 60
Rider 255 0 0
Car 0 0 142
Truck 0 0 70
Bus 0 60 100
Train 0 80 100
Motorcycle 0 0 230
Bicycle 119 11 32

View File

@ -0,0 +1,502 @@
#
# This prototxt is based on voc-fcn32s/val.prototxt file from
# https://github.com/shelhamer/fcn.berkeleyvision.org, which is distributed under
# Caffe (BSD) license:
# http://caffe.berkeleyvision.org/model_zoo.html#bvlc-model-license
#
name: "voc-fcn32s"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 500
input_dim: 500
layer {
name: "conv1_1"
type: "Convolution"
bottom: "data"
top: "conv1_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 100
kernel_size: 3
stride: 1
}
}
layer {
name: "relu1_1"
type: "ReLU"
bottom: "conv1_1"
top: "conv1_1"
}
layer {
name: "conv1_2"
type: "Convolution"
bottom: "conv1_1"
top: "conv1_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu1_2"
type: "ReLU"
bottom: "conv1_2"
top: "conv1_2"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1_2"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2_1"
type: "Convolution"
bottom: "pool1"
top: "conv2_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu2_1"
type: "ReLU"
bottom: "conv2_1"
top: "conv2_1"
}
layer {
name: "conv2_2"
type: "Convolution"
bottom: "conv2_1"
top: "conv2_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu2_2"
type: "ReLU"
bottom: "conv2_2"
top: "conv2_2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2_2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv3_1"
type: "Convolution"
bottom: "pool2"
top: "conv3_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_1"
type: "ReLU"
bottom: "conv3_1"
top: "conv3_1"
}
layer {
name: "conv3_2"
type: "Convolution"
bottom: "conv3_1"
top: "conv3_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_2"
type: "ReLU"
bottom: "conv3_2"
top: "conv3_2"
}
layer {
name: "conv3_3"
type: "Convolution"
bottom: "conv3_2"
top: "conv3_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_3"
type: "ReLU"
bottom: "conv3_3"
top: "conv3_3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3_3"
top: "pool3"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv4_1"
type: "Convolution"
bottom: "pool3"
top: "conv4_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu4_1"
type: "ReLU"
bottom: "conv4_1"
top: "conv4_1"
}
layer {
name: "conv4_2"
type: "Convolution"
bottom: "conv4_1"
top: "conv4_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu4_2"
type: "ReLU"
bottom: "conv4_2"
top: "conv4_2"
}
layer {
name: "conv4_3"
type: "Convolution"
bottom: "conv4_2"
top: "conv4_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu4_3"
type: "ReLU"
bottom: "conv4_3"
top: "conv4_3"
}
layer {
name: "pool4"
type: "Pooling"
bottom: "conv4_3"
top: "pool4"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv5_1"
type: "Convolution"
bottom: "pool4"
top: "conv5_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu5_1"
type: "ReLU"
bottom: "conv5_1"
top: "conv5_1"
}
layer {
name: "conv5_2"
type: "Convolution"
bottom: "conv5_1"
top: "conv5_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu5_2"
type: "ReLU"
bottom: "conv5_2"
top: "conv5_2"
}
layer {
name: "conv5_3"
type: "Convolution"
bottom: "conv5_2"
top: "conv5_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu5_3"
type: "ReLU"
bottom: "conv5_3"
top: "conv5_3"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5_3"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "fc6"
type: "Convolution"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4096
pad: 0
kernel_size: 7
stride: 1
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "fc7"
type: "Convolution"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4096
pad: 0
kernel_size: 1
stride: 1
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "score_fr"
type: "Convolution"
bottom: "fc7"
top: "score_fr"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 21
pad: 0
kernel_size: 1
}
}
layer {
name: "upscore"
type: "Deconvolution"
bottom: "score_fr"
top: "upscore"
param {
lr_mult: 0
}
convolution_param {
num_output: 21
bias_term: false
kernel_size: 64
stride: 32
}
}
layer {
name: "score"
type: "Crop"
bottom: "upscore"
bottom: "data"
top: "score"
crop_param {
axis: 2
offset: 19
}
}

View File

@ -0,0 +1,612 @@
#
# This prototxt is based on voc-fcn8s/val.prototxt file from
# https://github.com/shelhamer/fcn.berkeleyvision.org, which is distributed under
# Caffe (BSD) license:
# http://caffe.berkeleyvision.org/model_zoo.html#bvlc-model-license
#
name: "voc-fcn8s"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 500
input_dim: 500
layer {
name: "conv1_1"
type: "Convolution"
bottom: "data"
top: "conv1_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 100
kernel_size: 3
stride: 1
}
}
layer {
name: "relu1_1"
type: "ReLU"
bottom: "conv1_1"
top: "conv1_1"
}
layer {
name: "conv1_2"
type: "Convolution"
bottom: "conv1_1"
top: "conv1_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu1_2"
type: "ReLU"
bottom: "conv1_2"
top: "conv1_2"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1_2"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2_1"
type: "Convolution"
bottom: "pool1"
top: "conv2_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu2_1"
type: "ReLU"
bottom: "conv2_1"
top: "conv2_1"
}
layer {
name: "conv2_2"
type: "Convolution"
bottom: "conv2_1"
top: "conv2_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu2_2"
type: "ReLU"
bottom: "conv2_2"
top: "conv2_2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2_2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv3_1"
type: "Convolution"
bottom: "pool2"
top: "conv3_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_1"
type: "ReLU"
bottom: "conv3_1"
top: "conv3_1"
}
layer {
name: "conv3_2"
type: "Convolution"
bottom: "conv3_1"
top: "conv3_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_2"
type: "ReLU"
bottom: "conv3_2"
top: "conv3_2"
}
layer {
name: "conv3_3"
type: "Convolution"
bottom: "conv3_2"
top: "conv3_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_3"
type: "ReLU"
bottom: "conv3_3"
top: "conv3_3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3_3"
top: "pool3"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv4_1"
type: "Convolution"
bottom: "pool3"
top: "conv4_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu4_1"
type: "ReLU"
bottom: "conv4_1"
top: "conv4_1"
}
layer {
name: "conv4_2"
type: "Convolution"
bottom: "conv4_1"
top: "conv4_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu4_2"
type: "ReLU"
bottom: "conv4_2"
top: "conv4_2"
}
layer {
name: "conv4_3"
type: "Convolution"
bottom: "conv4_2"
top: "conv4_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu4_3"
type: "ReLU"
bottom: "conv4_3"
top: "conv4_3"
}
layer {
name: "pool4"
type: "Pooling"
bottom: "conv4_3"
top: "pool4"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv5_1"
type: "Convolution"
bottom: "pool4"
top: "conv5_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu5_1"
type: "ReLU"
bottom: "conv5_1"
top: "conv5_1"
}
layer {
name: "conv5_2"
type: "Convolution"
bottom: "conv5_1"
top: "conv5_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu5_2"
type: "ReLU"
bottom: "conv5_2"
top: "conv5_2"
}
layer {
name: "conv5_3"
type: "Convolution"
bottom: "conv5_2"
top: "conv5_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu5_3"
type: "ReLU"
bottom: "conv5_3"
top: "conv5_3"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5_3"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "fc6"
type: "Convolution"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4096
pad: 0
kernel_size: 7
stride: 1
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "fc7"
type: "Convolution"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4096
pad: 0
kernel_size: 1
stride: 1
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "score_fr"
type: "Convolution"
bottom: "fc7"
top: "score_fr"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 21
pad: 0
kernel_size: 1
}
}
layer {
name: "upscore2"
type: "Deconvolution"
bottom: "score_fr"
top: "upscore2"
param {
lr_mult: 0
}
convolution_param {
num_output: 21
bias_term: false
kernel_size: 4
stride: 2
}
}
layer {
name: "score_pool4"
type: "Convolution"
bottom: "pool4"
top: "score_pool4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 21
pad: 0
kernel_size: 1
}
}
layer {
name: "score_pool4c"
type: "Crop"
bottom: "score_pool4"
bottom: "upscore2"
top: "score_pool4c"
crop_param {
axis: 2
offset: 5
}
}
layer {
name: "fuse_pool4"
type: "Eltwise"
bottom: "upscore2"
bottom: "score_pool4c"
top: "fuse_pool4"
eltwise_param {
operation: SUM
}
}
layer {
name: "upscore_pool4"
type: "Deconvolution"
bottom: "fuse_pool4"
top: "upscore_pool4"
param {
lr_mult: 0
}
convolution_param {
num_output: 21
bias_term: false
kernel_size: 4
stride: 2
}
}
layer {
name: "score_pool3"
type: "Convolution"
bottom: "pool3"
top: "score_pool3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 21
pad: 0
kernel_size: 1
}
}
layer {
name: "score_pool3c"
type: "Crop"
bottom: "score_pool3"
bottom: "upscore_pool4"
top: "score_pool3c"
crop_param {
axis: 2
offset: 9
}
}
layer {
name: "fuse_pool3"
type: "Eltwise"
bottom: "upscore_pool4"
bottom: "score_pool3c"
top: "fuse_pool3"
eltwise_param {
operation: SUM
}
}
layer {
name: "upscore8"
type: "Deconvolution"
bottom: "fuse_pool3"
top: "upscore8"
param {
lr_mult: 0
}
convolution_param {
num_output: 21
bias_term: false
kernel_size: 16
stride: 8
}
}
layer {
name: "score"
type: "Crop"
bottom: "upscore8"
bottom: "data"
top: "score"
crop_param {
axis: 2
offset: 31
}
}

View File

@ -0,0 +1,154 @@
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
using namespace std;
static const string fcnType = "fcn8s";
static vector<cv::Vec3b> readColors(const string &filename = "pascal-classes.txt")
{
vector<cv::Vec3b> colors;
ifstream fp(filename.c_str());
if (!fp.is_open())
{
cerr << "File with colors not found: " << filename << endl;
exit(-1);
}
string line;
while (!fp.eof())
{
getline(fp, line);
if (line.length())
{
stringstream ss(line);
string name; ss >> name;
int temp;
cv::Vec3b color;
ss >> temp; color[0] = temp;
ss >> temp; color[1] = temp;
ss >> temp; color[2] = temp;
colors.push_back(color);
}
}
fp.close();
return colors;
}
static void colorizeSegmentation(const Mat &score, const vector<cv::Vec3b> &colors, cv::Mat &segm)
{
const int rows = score.size[2];
const int cols = score.size[3];
const int chns = score.size[1];
cv::Mat maxCl(rows, cols, CV_8UC1);
cv::Mat maxVal(rows, cols, CV_32FC1);
for (int ch = 0; ch < chns; ch++)
{
for (int row = 0; row < rows; row++)
{
const float *ptrScore = score.ptr<float>(0, ch, row);
uchar *ptrMaxCl = maxCl.ptr<uchar>(row);
float *ptrMaxVal = maxVal.ptr<float>(row);
for (int col = 0; col < cols; col++)
{
if (ptrScore[col] > ptrMaxVal[col])
{
ptrMaxVal[col] = ptrScore[col];
ptrMaxCl[col] = ch;
}
}
}
}
segm.create(rows, cols, CV_8UC3);
for (int row = 0; row < rows; row++)
{
const uchar *ptrMaxCl = maxCl.ptr<uchar>(row);
cv::Vec3b *ptrSegm = segm.ptr<cv::Vec3b>(row);
for (int col = 0; col < cols; col++)
{
ptrSegm[col] = colors[ptrMaxCl[col]];
}
}
}
int main(int argc, char **argv)
{
cv::dnn::initModule(); //Required if OpenCV is built as static libs
String modelTxt = fcnType + "-heavy-pascal.prototxt";
String modelBin = fcnType + "-heavy-pascal.caffemodel";
String imageFile = (argc > 1) ? argv[1] : "rgb.jpg";
vector<cv::Vec3b> colors = readColors();
//! [Create the importer of Caffe model]
Ptr<dnn::Importer> importer;
try //Try to import Caffe GoogleNet model
{
importer = dnn::createCaffeImporter(modelTxt, modelBin);
}
catch (const cv::Exception &err) //Importer can throw errors, we will catch them
{
cerr << err.msg << endl;
}
//! [Create the importer of Caffe model]
if (!importer)
{
cerr << "Can't load network by using the following files: " << endl;
cerr << "prototxt: " << modelTxt << endl;
cerr << "caffemodel: " << modelBin << endl;
cerr << fcnType << "-heavy-pascal.caffemodel can be downloaded here:" << endl;
cerr << "http://dl.caffe.berkeleyvision.org/" << fcnType << "-heavy-pascal.caffemodel" << endl;
exit(-1);
}
//! [Initialize network]
dnn::Net net;
importer->populateNet(net);
importer.release(); //We don't need importer anymore
//! [Initialize network]
//! [Prepare blob]
Mat img = imread(imageFile);
if (img.empty())
{
cerr << "Can't read image from the file: " << imageFile << endl;
exit(-1);
}
resize(img, img, Size(500, 500)); //FCN accepts 500x500 RGB-images
Mat inputBlob = blobFromImage(img); //Convert Mat to batch of images
//! [Prepare blob]
//! [Set input blob]
net.setInput(inputBlob, "data"); //set the network input
//! [Set input blob]
//! [Make forward pass]
double t = (double)cv::getTickCount();
Mat score = net.forward("score"); //compute output
t = (double)cv::getTickCount() - t;
printf("processing time: %.1fms\n", t*1000./getTickFrequency());
//! [Make forward pass]
Mat colorize;
colorizeSegmentation(score, colors, colorize);
Mat show;
addWeighted(img, 0.4, colorize, 0.6, 0.0, show);
imshow("show", show);
waitKey(0);
return 0;
} //main

View File

@ -0,0 +1,34 @@
from __future__ import print_function
import numpy as np
import cv2
from cv2 import dnn
import timeit
def prepare_image(img):
img = cv2.resize(img, (224, 224))
#convert interleaved image (RGBRGB) to planar(RRGGBB)
blob = np.moveaxis(img, 2, 0)
blob = np.reshape(blob.astype(np.float32), (-1, 3, 224, 224))
return blob
def timeit_forward(net):
print("OpenCL:", cv2.ocl.useOpenCL())
print("Runtime:", timeit.timeit(lambda: net.forward(), number=10))
def get_class_list():
with open('synset_words.txt', 'rt') as f:
return [ x[x.find(" ") + 1 :] for x in f ]
blob = prepare_image(cv2.imread('space_shuttle.jpg'))
print("Input:", blob.shape, blob.dtype)
cv2.ocl.setUseOpenCL(True) #Disable OCL if you want
net = dnn.readNetFromCaffe('bvlc_googlenet.prototxt', 'bvlc_googlenet.caffemodel')
net.setBlob(".data", blob)
net.forward()
#timeit_forward(net) #Uncomment to check performance
prob = net.getBlob("prob")
print("Output:", prob.shape, prob.dtype)
classes = get_class_list()
print("Best match", classes[prob.argmax()])

View File

@ -0,0 +1,21 @@
background 0 0 0
aeroplane 128 0 0
bicycle 0 128 0
bird 128 128 0
boat 0 0 128
bottle 128 0 128
bus 0 128 128
car 128 128 128
cat 64 0 0
chair 192 0 0
cow 64 128 0
diningtable 192 128 0
dog 64 0 128
horse 192 0 128
motorbike 64 128 128
person 192 128 128
pottedplant 0 64 0
sheep 128 64 0
sofa 0 192 0
train 128 192 0
tvmonitor 0 64 128

BIN
modules/dnn/samples/rgb.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

View File

@ -0,0 +1,120 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
// Sample of using Halide backend in OpenCV deep learning module.
// Based on dnn/samples/caffe_googlenet.cpp.
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
/* Find best class for the blob (i. e. class with maximal probability) */
void getMaxClass(const Mat &probBlob, int *classId, double *classProb)
{
Mat probMat = probBlob.reshape(1, 1); //reshape the blob to 1x1000 matrix
Point classNumber;
minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
*classId = classNumber.x;
}
std::vector<std::string> readClassNames(const char *filename = "synset_words.txt")
{
std::vector<std::string> classNames;
std::ifstream fp(filename);
if (!fp.is_open())
{
std::cerr << "File with classes labels not found: " << filename << std::endl;
exit(-1);
}
std::string name;
while (!fp.eof())
{
std::getline(fp, name);
if (name.length())
classNames.push_back( name.substr(name.find(' ')+1) );
}
fp.close();
return classNames;
}
int main(int argc, char **argv)
{
initModule(); // Required if OpenCV is built as static libs.
std::string modelTxt = "train_val.prototxt";
std::string modelBin = "squeezenet_v1.1.caffemodel";
std::string imageFile = (argc > 1) ? argv[1] : "space_shuttle.jpg";
//! [Read and initialize network]
Net net = dnn::readNetFromCaffe(modelTxt, modelBin);
//! [Read and initialize network]
//! [Check that network was read successfully]
if (net.empty())
{
std::cerr << "Can't load network by using the following files: " << std::endl;
std::cerr << "prototxt: " << modelTxt << std::endl;
std::cerr << "caffemodel: " << modelBin << std::endl;
std::cerr << "SqueezeNet v1.1 can be downloaded from:" << std::endl;
std::cerr << "https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1" << std::endl;
exit(-1);
}
//! [Check that network was read successfully]
//! [Prepare blob]
Mat img = imread(imageFile);
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
}
if (img.channels() != 3)
{
std::cerr << "Image " << imageFile << " isn't 3-channel" << std::endl;
exit(-1);
}
resize(img, img, Size(227, 227)); // SqueezeNet v1.1 predict class by 3x227x227 input image.
Mat inputBlob = blobFromImage(img, 1.0, Size(), Scalar(), false); // Convert Mat to 4-dimensional batch.
//! [Prepare blob]
//! [Set input blob]
net.setInput(inputBlob); // Set the network input.
//! [Set input blob]
//! [Enable Halide backend]
net.setPreferableBackend(DNN_BACKEND_HALIDE); // Tell engine to use Halide where it possible.
//! [Enable Halide backend]
//! [Make forward pass]
Mat prob = net.forward("prob"); // Compute output.
//! [Make forward pass]
//! [Determine the best class]
int classId;
double classProb;
getMaxClass(prob, &classId, &classProb); // Find the best class.
//! [Determine the best class]
//! [Print results]
std::vector<std::string> classNames = readClassNames();
std::cout << "Best class: #" << classId << " '" << classNames.at(classId) << "'" << std::endl;
std::cout << "Probability: " << classProb * 100 << "%" << std::endl;
//! [Print results]
return 0;
} //main

View File

@ -0,0 +1,154 @@
#include <opencv2/dnn.hpp>
#include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
using namespace std;
const size_t width = 300;
const size_t height = 300;
Mat getMean(const size_t& imageHeight, const size_t& imageWidth)
{
Mat mean;
const int meanValues[3] = {104, 117, 123};
vector<Mat> meanChannels;
for(size_t i = 0; i < 3; i++)
{
Mat channel(imageHeight, imageWidth, CV_32F, Scalar(meanValues[i]));
meanChannels.push_back(channel);
}
cv::merge(meanChannels, mean);
return mean;
}
Mat preprocess(const Mat& frame)
{
Mat preprocessed;
frame.convertTo(preprocessed, CV_32F);
resize(preprocessed, preprocessed, Size(width, height)); //SSD accepts 300x300 RGB-images
Mat mean = getMean(width, height);
cv::subtract(preprocessed, mean, preprocessed);
return preprocessed;
}
const char* about = "This sample uses Single-Shot Detector "
"(https://arxiv.org/abs/1512.02325)"
"to detect objects on image\n"; // TODO: link
const char* params
= "{ help | false | print usage }"
"{ proto | | model configuration }"
"{ model | | model weights }"
"{ image | | image for detection }"
"{ min_confidence | 0.5 | min confidence }";
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv, params);
if (parser.get<bool>("help"))
{
std::cout << about << std::endl;
parser.printMessage();
return 0;
}
cv::dnn::initModule(); //Required if OpenCV is built as static libs
String modelConfiguration = parser.get<string>("proto");
String modelBinary = parser.get<string>("model");
//! [Create the importer of Caffe model]
Ptr<dnn::Importer> importer;
// Import Caffe SSD model
try
{
importer = dnn::createCaffeImporter(modelConfiguration, modelBinary);
}
catch (const cv::Exception &err) //Importer can throw errors, we will catch them
{
cerr << err.msg << endl;
}
//! [Create the importer of Caffe model]
if (!importer)
{
cerr << "Can't load network by using the following files: " << endl;
cerr << "prototxt: " << modelConfiguration << endl;
cerr << "caffemodel: " << modelBinary << endl;
cerr << "Models can be downloaded here:" << endl;
cerr << "https://github.com/weiliu89/caffe/tree/ssd#models" << endl;
exit(-1);
}
//! [Initialize network]
dnn::Net net;
importer->populateNet(net);
importer.release(); //We don't need importer anymore
//! [Initialize network]
cv::Mat frame = cv::imread(parser.get<string>("image"), -1);
if (frame.channels() == 4)
cvtColor(frame, frame, COLOR_BGRA2BGR);
//! [Prepare blob]
Mat preprocessedFrame = preprocess(frame);
Mat inputBlob = blobFromImage(preprocessedFrame); //Convert Mat to batch of images
//! [Prepare blob]
//! [Set input blob]
net.setInput(inputBlob, "data"); //set the network input
//! [Set input blob]
//! [Make forward pass]
Mat detection = net.forward("detection_out"); //compute output
//! [Make forward pass]
Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
float confidenceThreshold = parser.get<float>("min_confidence");
for(int i = 0; i < detectionMat.rows; i++)
{
float confidence = detectionMat.at<float>(i, 2);
if(confidence > confidenceThreshold)
{
size_t objectClass = detectionMat.at<float>(i, 1);
float xLeftBottom = detectionMat.at<float>(i, 3) * frame.cols;
float yLeftBottom = detectionMat.at<float>(i, 4) * frame.rows;
float xRightTop = detectionMat.at<float>(i, 5) * frame.cols;
float yRightTop = detectionMat.at<float>(i, 6) * frame.rows;
std::cout << "Class: " << objectClass << std::endl;
std::cout << "Confidence: " << confidence << std::endl;
std::cout << " " << xLeftBottom
<< " " << yLeftBottom
<< " " << xRightTop
<< " " << yRightTop << std::endl;
Rect object(xLeftBottom, yLeftBottom,
xRightTop - xLeftBottom,
yRightTop - yLeftBottom);
rectangle(frame, object, Scalar(0, 255, 0));
}
}
imshow("detections", frame);
waitKey();
return 0;
} // main

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,173 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Sample of using OpenCV dnn module with Tensorflow Inception model.
*/
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
using namespace std;
const String keys =
"{help h || Sample app for loading Inception TensorFlow model. "
"The model and class names list can be downloaded here: "
"https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip }"
"{model m |tensorflow_inception_graph.pb| path to TensorFlow .pb model file }"
"{image i || path to image file }"
"{i_blob | input | input blob name) }"
"{o_blob | softmax2 | output blob name) }"
"{c_names c | imagenet_comp_graph_label_strings.txt | path to file with classnames for class id }"
"{result r || path to save output blob (optional, binary format, NCHW order) }"
;
void getMaxClass(const Mat &probBlob, int *classId, double *classProb);
std::vector<String> readClassNames(const char *filename);
int main(int argc, char **argv)
{
cv::CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
String modelFile = parser.get<String>("model");
String imageFile = parser.get<String>("image");
String inBlobName = parser.get<String>("i_blob");
String outBlobName = parser.get<String>("o_blob");
if (!parser.check())
{
parser.printErrors();
return 0;
}
String classNamesFile = parser.get<String>("c_names");
String resultFile = parser.get<String>("result");
//! [Create the importer of TensorFlow model]
Ptr<dnn::Importer> importer;
try //Try to import TensorFlow AlexNet model
{
importer = dnn::createTensorflowImporter(modelFile);
}
catch (const cv::Exception &err) //Importer can throw errors, we will catch them
{
std::cerr << err.msg << std::endl;
}
//! [Create the importer of Caffe model]
if (!importer)
{
std::cerr << "Can't load network by using the mode file: " << std::endl;
std::cerr << modelFile << std::endl;
exit(-1);
}
//! [Initialize network]
dnn::Net net;
importer->populateNet(net);
importer.release(); //We don't need importer anymore
//! [Initialize network]
//! [Prepare blob]
Mat img = imread(imageFile);
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
}
cv::Size inputImgSize = cv::Size(224, 224);
if (inputImgSize != img.size())
resize(img, img, inputImgSize); //Resize image to input size
Mat inputBlob = blobFromImage(img); //Convert Mat to image batch
//! [Prepare blob]
inputBlob -= 117.0;
//! [Set input blob]
net.setInput(inputBlob, inBlobName); //set the network input
//! [Set input blob]
cv::TickMeter tm;
tm.start();
//! [Make forward pass]
Mat result = net.forward(outBlobName); //compute output
//! [Make forward pass]
tm.stop();
if (!resultFile.empty()) {
CV_Assert(result.isContinuous());
ofstream fout(resultFile.c_str(), ios::out | ios::binary);
fout.write((char*)result.data, result.total() * sizeof(float));
fout.close();
}
std::cout << "Output blob shape " << result.size[0] << " x " << result.size[1] << " x " << result.size[2] << " x " << result.size[3] << std::endl;
std::cout << "Inference time, ms: " << tm.getTimeMilli() << std::endl;
if (!classNamesFile.empty()) {
std::vector<String> classNames = readClassNames(classNamesFile.c_str());
int classId;
double classProb;
getMaxClass(result, &classId, &classProb);//find the best class
//! [Print results]
std::cout << "Best class: #" << classId << " '" << classNames.at(classId) << "'" << std::endl;
std::cout << "Probability: " << classProb * 100 << "%" << std::endl;
}
return 0;
} //main
/* Find best class for the blob (i. e. class with maximal probability) */
void getMaxClass(const Mat &probBlob, int *classId, double *classProb)
{
Mat probMat = probBlob.reshape(1, 1); //reshape the blob to 1x1000 matrix
Point classNumber;
minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
*classId = classNumber.x;
}
std::vector<String> readClassNames(const char *filename)
{
std::vector<String> classNames;
std::ifstream fp(filename);
if (!fp.is_open())
{
std::cerr << "File with classes labels not found: " << filename << std::endl;
exit(-1);
}
std::string name;
while (!fp.eof())
{
std::getline(fp, name);
if (name.length())
classNames.push_back( name );
}
fp.close();
return classNames;
}

View File

@ -0,0 +1,207 @@
/*
Sample of using OpenCV dnn module with Torch ENet model.
*/
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
#include <sstream>
using namespace std;
const String keys =
"{help h || Sample app for loading ENet Torch model. "
"The model and class names list can be downloaded here: "
"https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa }"
"{model m || path to Torch .net model file (model_best.net) }"
"{image i || path to image file }"
"{c_names c || path to file with classnames for channels (optional, categories.txt) }"
"{result r || path to save output blob (optional, binary format, NCHW order) }"
"{show s || whether to show all output channels or not}"
"{o_blob || output blob's name. If empty, last blob's name in net is used}"
;
static void colorizeSegmentation(const Mat &score, Mat &segm,
Mat &legend, vector<String> &classNames, vector<Vec3b> &colors);
static vector<Vec3b> readColors(const String &filename, vector<String>& classNames);
int main(int argc, char **argv)
{
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
String modelFile = parser.get<String>("model");
String imageFile = parser.get<String>("image");
if (!parser.check())
{
parser.printErrors();
return 0;
}
String classNamesFile = parser.get<String>("c_names");
String resultFile = parser.get<String>("result");
//! [Read model and initialize network]
dnn::Net net = dnn::readNetFromTorch(modelFile);
//! [Prepare blob]
Mat img = imread(imageFile), input;
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
}
Size origSize = img.size();
Size inputImgSize = cv::Size(1024, 512);
if (inputImgSize != origSize)
resize(img, img, inputImgSize); //Resize image to input size
Mat inputBlob = blobFromImage(img, 1./255); //Convert Mat to image batch
//! [Prepare blob]
//! [Set input blob]
net.setInput(inputBlob, ""); //set the network input
//! [Set input blob]
TickMeter tm;
String oBlob = net.getLayerNames().back();
if (!parser.get<String>("o_blob").empty())
{
oBlob = parser.get<String>("o_blob");
}
//! [Make forward pass]
Mat result = net.forward(oBlob);
if (!resultFile.empty()) {
CV_Assert(result.isContinuous());
ofstream fout(resultFile.c_str(), ios::out | ios::binary);
fout.write((char*)result.data, result.total() * sizeof(float));
fout.close();
}
std::cout << "Output blob: " << result.size[0] << " x " << result.size[1] << " x " << result.size[2] << " x " << result.size[3] << "\n";
std::cout << "Inference time, ms: " << tm.getTimeMilli() << std::endl;
if (parser.has("show"))
{
std::vector<String> classNames;
vector<cv::Vec3b> colors;
if(!classNamesFile.empty()) {
colors = readColors(classNamesFile, classNames);
}
Mat segm, legend;
colorizeSegmentation(result, segm, legend, classNames, colors);
Mat show;
addWeighted(img, 0.1, segm, 0.9, 0.0, show);
cv::resize(show, show, origSize, 0, 0, cv::INTER_NEAREST);
imshow("Result", show);
if(classNames.size())
imshow("Legend", legend);
waitKey();
}
return 0;
} //main
static void colorizeSegmentation(const Mat &score, Mat &segm, Mat &legend, vector<String> &classNames, vector<Vec3b> &colors)
{
const int rows = score.size[2];
const int cols = score.size[3];
const int chns = score.size[1];
cv::Mat maxCl(rows, cols, CV_8UC1);
cv::Mat maxVal(rows, cols, CV_32FC1);
for (int ch = 0; ch < chns; ch++)
{
for (int row = 0; row < rows; row++)
{
const float *ptrScore = score.ptr<float>(0, ch, row);
uchar *ptrMaxCl = maxCl.ptr<uchar>(row);
float *ptrMaxVal = maxVal.ptr<float>(row);
for (int col = 0; col < cols; col++)
{
if (ptrScore[col] > ptrMaxVal[col])
{
ptrMaxVal[col] = ptrScore[col];
ptrMaxCl[col] = ch;
}
}
}
}
segm.create(rows, cols, CV_8UC3);
for (int row = 0; row < rows; row++)
{
const uchar *ptrMaxCl = maxCl.ptr<uchar>(row);
cv::Vec3b *ptrSegm = segm.ptr<cv::Vec3b>(row);
for (int col = 0; col < cols; col++)
{
ptrSegm[col] = colors[ptrMaxCl[col]];
}
}
if (classNames.size() == colors.size())
{
int blockHeight = 30;
legend.create(blockHeight*classNames.size(), 200, CV_8UC3);
for(int i = 0; i < classNames.size(); i++)
{
cv::Mat block = legend.rowRange(i*blockHeight, (i+1)*blockHeight);
block = colors[i];
putText(block, classNames[i], Point(0, blockHeight/2), FONT_HERSHEY_SIMPLEX, 0.5, Scalar());
}
}
}
static vector<Vec3b> readColors(const String &filename, vector<String>& classNames)
{
vector<cv::Vec3b> colors;
classNames.clear();
ifstream fp(filename.c_str());
if (!fp.is_open())
{
cerr << "File with colors not found: " << filename << endl;
exit(-1);
}
string line;
while (!fp.eof())
{
getline(fp, line);
if (line.length())
{
stringstream ss(line);
string name; ss >> name;
int temp;
cv::Vec3b color;
ss >> temp; color[0] = temp;
ss >> temp; color[1] = temp;
ss >> temp; color[2] = temp;
classNames.push_back(name);
colors.push_back(color);
}
}
fp.close();
return colors;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,382 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
using namespace cv;
using namespace cv::dnn;
#if HAVE_PROTOBUF
#include "caffe.pb.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <algorithm>
#include <google/protobuf/message.h>
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include "caffe_io.hpp"
using ::google::protobuf::RepeatedField;
using ::google::protobuf::RepeatedPtrField;
using ::google::protobuf::Message;
using ::google::protobuf::Descriptor;
using ::google::protobuf::FieldDescriptor;
using ::google::protobuf::Reflection;
namespace
{
template<typename T>
static cv::String toString(const T &v)
{
std::ostringstream ss;
ss << v;
return ss.str();
}
class CaffeImporter : public Importer
{
caffe::NetParameter net;
caffe::NetParameter netBinary;
public:
CaffeImporter(const char *pototxt, const char *caffeModel)
{
ReadNetParamsFromTextFileOrDie(pototxt, &net);
if (caffeModel && caffeModel[0])
ReadNetParamsFromBinaryFileOrDie(caffeModel, &netBinary);
}
void addParam(const Message &msg, const FieldDescriptor *field, cv::dnn::LayerParams &params)
{
const Reflection *refl = msg.GetReflection();
int type = field->cpp_type();
bool isRepeated = field->is_repeated();
const std::string &name = field->name();
#define SET_UP_FILED(getter, arrayConstr, gtype) \
if (isRepeated) { \
const RepeatedField<gtype> &v = refl->GetRepeatedField<gtype>(msg, field); \
params.set(name, DictValue::arrayConstr(v.begin(), (int)v.size())); \
} \
else { \
params.set(name, refl->getter(msg, field)); \
}
switch (type)
{
case FieldDescriptor::CPPTYPE_INT32:
SET_UP_FILED(GetInt32, arrayInt, ::google::protobuf::int32);
break;
case FieldDescriptor::CPPTYPE_UINT32:
SET_UP_FILED(GetUInt32, arrayInt, ::google::protobuf::uint32);
break;
case FieldDescriptor::CPPTYPE_INT64:
SET_UP_FILED(GetInt32, arrayInt, ::google::protobuf::int64);
break;
case FieldDescriptor::CPPTYPE_UINT64:
SET_UP_FILED(GetUInt32, arrayInt, ::google::protobuf::uint64);
break;
case FieldDescriptor::CPPTYPE_BOOL:
SET_UP_FILED(GetBool, arrayInt, bool);
break;
case FieldDescriptor::CPPTYPE_DOUBLE:
SET_UP_FILED(GetDouble, arrayReal, double);
break;
case FieldDescriptor::CPPTYPE_FLOAT:
SET_UP_FILED(GetFloat, arrayReal, float);
break;
case FieldDescriptor::CPPTYPE_STRING:
if (isRepeated) {
const RepeatedPtrField<std::string> &v = refl->GetRepeatedPtrField<std::string>(msg, field);
params.set(name, DictValue::arrayString(v.begin(), (int)v.size()));
}
else {
params.set(name, refl->GetString(msg, field));
}
break;
case FieldDescriptor::CPPTYPE_ENUM:
if (isRepeated) {
int size = refl->FieldSize(msg, field);
std::vector<cv::String> buf(size);
for (int i = 0; i < size; i++)
buf[i] = refl->GetRepeatedEnum(msg, field, i)->name();
params.set(name, DictValue::arrayString(buf.begin(), size));
}
else {
params.set(name, refl->GetEnum(msg, field)->name());
}
break;
default:
CV_Error(Error::StsError, "Unknown type \"" + String(field->type_name()) + "\" in prototxt");
break;
}
}
inline static bool ends_with_param(const std::string &str)
{
static const std::string _param("_param");
return (str.size() >= _param.size()) && str.compare(str.size() - _param.size(), _param.size(), _param) == 0;
}
void extractLayerParams(const Message &msg, cv::dnn::LayerParams &params, bool isInternal = false)
{
const Descriptor *msgDesc = msg.GetDescriptor();
const Reflection *msgRefl = msg.GetReflection();
for (int fieldId = 0; fieldId < msgDesc->field_count(); fieldId++)
{
const FieldDescriptor *fd = msgDesc->field(fieldId);
if (!isInternal && !ends_with_param(fd->name()))
continue;
bool hasData = fd->is_required() ||
(fd->is_optional() && msgRefl->HasField(msg, fd)) ||
(fd->is_repeated() && msgRefl->FieldSize(msg, fd) > 0);
if (!hasData)
continue;
if (fd->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE)
{
if (fd->is_repeated()) //Extract only first item!
extractLayerParams(msgRefl->GetRepeatedMessage(msg, fd, 0), params, true);
else
extractLayerParams(msgRefl->GetMessage(msg, fd), params, true);
}
else
{
addParam(msg, fd, params);
}
}
}
void blobShapeFromProto(const caffe::BlobProto &pbBlob, MatShape& shape)
{
shape.clear();
if (pbBlob.has_num() || pbBlob.has_channels() || pbBlob.has_height() || pbBlob.has_width())
{
shape.push_back(pbBlob.num());
shape.push_back(pbBlob.channels());
shape.push_back(pbBlob.height());
shape.push_back(pbBlob.width());
}
else if (pbBlob.has_shape())
{
const caffe::BlobShape &_shape = pbBlob.shape();
for (int i = 0; i < _shape.dim_size(); i++)
shape.push_back((int)_shape.dim(i));
}
else
CV_Error(Error::StsError, "Unknown shape of input blob");
}
void blobFromProto(const caffe::BlobProto &pbBlob, cv::Mat &dstBlob)
{
MatShape shape;
blobShapeFromProto(pbBlob, shape);
dstBlob.create((int)shape.size(), &shape[0], CV_32F);
CV_Assert(pbBlob.data_size() == (int)dstBlob.total());
CV_DbgAssert(pbBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT);
float *dstData = dstBlob.ptr<float>();
for (int i = 0; i < pbBlob.data_size(); i++)
dstData[i] = pbBlob.data(i);
}
void extractBinaryLayerParms(const caffe::LayerParameter& layer, LayerParams& layerParams)
{
const std::string &name = layer.name();
int li;
for (li = 0; li != netBinary.layer_size(); li++)
{
if (netBinary.layer(li).name() == name)
break;
}
if (li == netBinary.layer_size() || netBinary.layer(li).blobs_size() == 0)
return;
const caffe::LayerParameter &binLayer = netBinary.layer(li);
layerParams.blobs.resize(binLayer.blobs_size());
for (int bi = 0; bi < binLayer.blobs_size(); bi++)
{
blobFromProto(binLayer.blobs(bi), layerParams.blobs[bi]);
}
}
struct BlobNote
{
BlobNote(const std::string &_name, int _layerId, int _outNum) :
name(_name.c_str()), layerId(_layerId), outNum(_outNum) {}
const char *name;
int layerId, outNum;
};
std::vector<BlobNote> addedBlobs;
std::map<String, int> layerCounter;
void populateNet(Net dstNet)
{
int layersSize = net.layer_size();
layerCounter.clear();
addedBlobs.clear();
addedBlobs.reserve(layersSize + 1);
//setup input layer names
{
std::vector<String> netInputs(net.input_size());
for (int inNum = 0; inNum < net.input_size(); inNum++)
{
addedBlobs.push_back(BlobNote(net.input(inNum), 0, inNum));
netInputs[inNum] = net.input(inNum);
}
dstNet.setInputsNames(netInputs);
}
for (int li = 0; li < layersSize; li++)
{
const caffe::LayerParameter &layer = net.layer(li);
String name = layer.name();
String type = layer.type();
LayerParams layerParams;
extractLayerParams(layer, layerParams);
extractBinaryLayerParms(layer, layerParams);
int repetitions = layerCounter[name]++;
if (repetitions)
name += String("_") + toString(repetitions);
int id = dstNet.addLayer(name, type, layerParams);
for (int inNum = 0; inNum < layer.bottom_size(); inNum++)
addInput(layer.bottom(inNum), id, inNum, dstNet);
for (int outNum = 0; outNum < layer.top_size(); outNum++)
addOutput(layer, id, outNum);
}
addedBlobs.clear();
}
void addOutput(const caffe::LayerParameter &layer, int layerId, int outNum)
{
const std::string &name = layer.top(outNum);
bool haveDups = false;
for (int idx = (int)addedBlobs.size() - 1; idx >= 0; idx--)
{
if (addedBlobs[idx].name == name)
{
haveDups = true;
break;
}
}
if (haveDups)
{
bool isInplace = layer.bottom_size() > outNum && layer.bottom(outNum) == name;
if (!isInplace)
CV_Error(Error::StsBadArg, "Duplicate blobs produced by multiple sources");
}
addedBlobs.push_back(BlobNote(name, layerId, outNum));
}
void addInput(const std::string &name, int layerId, int inNum, Net &dstNet)
{
int idx;
for (idx = (int)addedBlobs.size() - 1; idx >= 0; idx--)
{
if (addedBlobs[idx].name == name)
break;
}
if (idx < 0)
{
CV_Error(Error::StsObjectNotFound, "Can't find output blob \"" + name + "\"");
return;
}
dstNet.connect(addedBlobs[idx].layerId, addedBlobs[idx].outNum, layerId, inNum);
}
~CaffeImporter()
{
}
};
}
Ptr<Importer> cv::dnn::createCaffeImporter(const String &prototxt, const String &caffeModel)
{
return Ptr<Importer>(new CaffeImporter(prototxt.c_str(), caffeModel.c_str()));
}
#else //HAVE_PROTOBUF
Ptr<Importer> cv::dnn::createCaffeImporter(const String&, const String&)
{
CV_Error(cv::Error::StsNotImplemented, "libprotobuf required to import data from Caffe models");
return Ptr<Importer>();
}
#endif //HAVE_PROTOBUF
Net cv::dnn::readNetFromCaffe(const String &prototxt, const String &caffeModel /*= String()*/)
{
Ptr<Importer> caffeImporter = createCaffeImporter(prototxt, caffeModel);
Net net;
if (caffeImporter)
caffeImporter->populateNet(net);
return net;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,108 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*M///////////////////////////////////////////////////////////////////////////////////////
//COPYRIGHT
//
//All contributions by the University of California:
//Copyright (c) 2014, The Regents of the University of California (Regents)
//All rights reserved.
//
//All other contributions:
//Copyright (c) 2014, the respective contributors
//All rights reserved.
//
//Caffe uses a shared copyright model: each contributor holds copyright over
//their contributions to Caffe. The project versioning records all such
//contribution and copyright details. If a contributor wants to further mark
//their specific copyright on a particular contribution, they should indicate
//their copyright solely in the commit message of the change when it is
//committed.
//
//LICENSE
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions are met:
//
//1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
//ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
//WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
//DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
//ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
//(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
//ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
//(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
//SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//CONTRIBUTION AGREEMENT
//
//By contributing to the BVLC/caffe repository through pull-request, comment,
//or otherwise, the contributor releases their content to the
//license and copyright terms herein.
//
//M*/
#ifndef __OPENCV_DNN_CAFFE_IO_HPP__
#define __OPENCV_DNN_CAFFE_IO_HPP__
#if HAVE_PROTOBUF
#include "caffe.pb.h"
namespace cv {
namespace dnn {
// Read parameters from a file into a NetParameter proto message.
void ReadNetParamsFromTextFileOrDie(const char* param_file,
caffe::NetParameter* param);
void ReadNetParamsFromBinaryFileOrDie(const char* param_file,
caffe::NetParameter* param);
}
}
#endif
#endif

View File

@ -0,0 +1,104 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_CAFFE_GLOG_EMULATOR_HPP__
#define __OPENCV_DNN_CAFFE_GLOG_EMULATOR_HPP__
#include <cstdlib>
#include <iostream>
#include <sstream>
#include <opencv2/core.hpp>
#define CHECK(cond) for(cv::dnn::GLogWrapper _logger(__FILE__, CV_Func, __LINE__, "CHECK", #cond, cond); _logger.exit(); _logger.check()) _logger.stream()
#define CHECK_EQ(a, b) for(cv::dnn::GLogWrapper _logger(__FILE__, CV_Func, __LINE__, "CHECK", #a"="#b, ((a) == (b))); _logger.exit(); _logger.check()) _logger.stream()
#define LOG(TYPE) for(cv::dnn::GLogWrapper _logger(__FILE__, CV_Func, __LINE__, #TYPE); _logger.exit(); _logger.check()) _logger.stream()
namespace cv
{
namespace dnn
{
class GLogWrapper
{
const char *file, *func, *type, *cond_str;
int line;
bool cond_staus, exit_loop;
std::stringstream sstream;
public:
GLogWrapper(const char *_file, const char *_func, int _line,
const char *_type,
const char *_cond_str = NULL, bool _cond_status = true
) :
file(_file), func(_func), type(_type), cond_str(_cond_str),
line(_line), cond_staus(_cond_status), exit_loop(true) {}
std::iostream &stream()
{
return sstream;
}
bool exit()
{
return exit_loop;
}
void check()
{
exit_loop = false;
if (cond_str && !cond_staus)
{
cv::error(cv::Error::StsError, "FAILED: " + String(cond_str) + ". " + sstream.str(), func, file, line);
}
else if (!cond_str && strcmp(type, "CHECK"))
{
if (!std::strcmp(type, "INFO"))
std::cout << sstream.str() << std::endl;
else
std::cerr << sstream.str() << std::endl;
}
}
};
}
}
#endif

2001
modules/dnn/src/dnn.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,284 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#include "halide_scheduler.hpp"
#include "op_halide.hpp"
namespace cv
{
namespace dnn
{
#ifdef HAVE_HALIDE
static void applySplit(const FileNode& directive, Halide::Func& func,
const FileNode& params)
{
for (const auto& varNode : directive)
{
const std::string varName = varNode.name();
const std::string factorName = (std::string)varNode;
Halide::Var var(varName);
Halide::Var outerVar(varName + "o");
Halide::Var innerVar(varName + "i");
// If split factor is integer or parameters map has parameter value.
CV_Assert(varNode.isString() && !params[factorName].empty() ||
varNode.isInt());
int factor = (int)(varNode.isInt() ? varNode : params[factorName]);
func.split(var, outerVar, innerVar, factor);
}
}
static void applyReorder(const FileNode& directive, Halide::Func& func)
{
std::string varName;
const int numVars = directive.size();
std::vector<Halide::VarOrRVar> reorderedVars;
reorderedVars.reserve(numVars);
for (int i = 0; i < numVars; ++i)
{
directive[i] >> varName;
reorderedVars.push_back(Halide::Var(varName));
}
func.reorder(reorderedVars);
}
static void applyFuse(const FileNode& directive, Halide::Func& func)
{
CV_Assert(directive["src"].size() >= 2);
CV_Assert(directive["dst"].size() == 1);
std::string str;
directive["src"][0] >> str;
Halide::Var firstVar(str);
directive["src"][1] >> str;
Halide::Var secondVar(str);
directive["dst"] >> str;
Halide::Var dstVar(str);
func.fuse(firstVar, secondVar, dstVar);
for (int i = 2, n = directive["src"].size(); i < n; ++i)
{
directive["src"][i] >> str;
func.fuse(Halide::Var(str), dstVar, dstVar);
}
}
static void applyParallel(const FileNode& directive, Halide::Func& func)
{
std::string varName;
for (int i = 0, n = directive.size(); i < n; ++i)
{
directive[i] >> varName;
func.parallel(Halide::Var(varName));
}
}
static void applyUnroll(const FileNode& directive, Halide::Func& func)
{
std::string varName;
for (int i = 0, n = directive.size(); i < n; ++i)
{
directive[i] >> varName;
func.unroll(Halide::Var(varName));
}
}
static void applyVectorize(const FileNode& directive, Halide::Func& func,
const FileNode& params)
{
for (const auto& varNode : directive)
{
const std::string varName = varNode.name();
const std::string factorName = (std::string)varNode;
// If split factor is integer or parameters map has parameter value.
CV_Assert(varNode.isString() && !params[factorName].empty() ||
varNode.isInt());
int factor = (int)(varNode.isInt() ? varNode : params[factorName]);
Halide::Var var(varName);
Halide::Var inner(varName + "v");
func.split(var, var, inner, factor);
func.vectorize(inner);
}
}
static void applyStoreAt(const FileNode& directive, Halide::Func& func,
std::map<std::string, Halide::Func>& funcsMap)
{
for (const auto& funcNode : directive)
{
const std::string targetFuncName = funcNode.name();
if (funcsMap.find(targetFuncName) == funcsMap.end())
CV_Error(cv::Error::StsParseError, "Function " + targetFuncName +
" is not represented in Halide pipeline");
Halide::Func targetFunc = funcsMap[targetFuncName];
func.store_at(targetFunc, (std::string)funcNode);
break;
}
}
static void applyComputeAt(const FileNode& directive, Halide::Func& func,
std::map<std::string, Halide::Func>& funcsMap)
{
for (const auto& funcNode : directive)
{
const std::string targetFuncName = funcNode.name();
if (funcsMap.find(targetFuncName) == funcsMap.end())
CV_Error(cv::Error::StsParseError, "Function " + targetFuncName +
" is not represented in Halide pipeline");
Halide::Func targetFunc = funcsMap[targetFuncName];
func.compute_at(targetFunc, (std::string)funcNode);
break;
}
}
static void applyComputeRoot(const FileNode& directive, Halide::Func& func)
{
bool compute_root;
directive >> compute_root;
if (compute_root)
func.compute_root();
}
static void applyGpuBlocks(const FileNode& directive, Halide::Func& func)
{
std::string varName;
for (int i = 0, n = directive.size(); i < n; ++i)
{
directive[i] >> varName;
func.gpu_blocks(Halide::Var(varName));
}
}
static void applyGpuThreads(const FileNode& directive, Halide::Func& func)
{
std::string varName;
for (int i = 0, n = directive.size(); i < n; ++i)
{
directive[i] >> varName;
func.gpu_threads(Halide::Var(varName));
}
}
static void apply(const FileNode& directives, Halide::Func& func,
std::map<std::string, Halide::Func>& funcsMap,
const FileNode& params)
{
for (const auto& directive : directives)
{
if (directive.name() == "split")
applySplit(directive, func, params);
else if (directive.name() == "reorder")
applyReorder(directive, func);
else if (directive.name() == "fuse")
applyFuse(directive, func);
else if (directive.name() == "parallel")
applyParallel(directive, func);
else if (directive.name() == "unroll")
applyUnroll(directive, func);
else if (directive.name() == "vectorize")
applyVectorize(directive, func, params);
else if (directive.name() == "store_at")
applyStoreAt(directive, func, funcsMap);
else if (directive.name() == "compute_at")
applyComputeAt(directive, func, funcsMap);
else if (directive.name() == "compute_root")
applyComputeRoot(directive, func);
else if (directive.name() == "gpu_blocks")
applyGpuBlocks(directive, func);
else if (directive.name() == "gpu_threads")
applyGpuThreads(directive, func);
else
CV_Error(Error::StsNotImplemented, "Scheduling directive " +
directive.name() + " is not implemented.");
}
}
// Remove any numeric symbols after '$' sign.
static std::string Deunique(std::string str)
{
int pos = -1;
do
{
pos = str.find('$');
if (pos != -1)
{
int len = str.find_first_not_of("0123456789", pos + 1) - pos;
str = str.replace(pos, len, "");
}
}
while (pos != -1);
return str;
}
#endif // HAVE_HALIDE
HalideScheduler::HalideScheduler(const std::string& configFile)
{
if (!configFile.empty())
fs = FileStorage(configFile, FileStorage::READ);
}
HalideScheduler::~HalideScheduler()
{
if (fs.isOpened())
fs.release();
}
bool HalideScheduler::process(Ptr<BackendNode>& node)
{
#ifdef HAVE_HALIDE
if (!fs.isOpened())
return false;
const FileNode& scheduleNode = fs["scheduling"];
if (scheduleNode.empty())
CV_Error(cv::Error::StsParseError, "Scheduling file should has scheduling node");
std::string str;
std::map<std::string, Halide::Func> funcsMap; // Scheduled functions.
// For every function, from top to bottom, we try to find a scheduling node.
// Scheduling is successful (return true) if for the first function (top)
// node is respresented.
CV_Assert(!node.empty());
std::vector<Halide::Func>& funcs = node.dynamicCast<HalideBackendNode>()->funcs;
for (int i = funcs.size() - 1; i >= 0; --i)
{
Halide::Func& func = funcs[i];
// For functions with the same name Halide generates unique names
// for example func, func$1, func$2.
// They are always formed with '$' and number.
std::string funcName = Deunique(func.name());
const FileNode& funcNode = scheduleNode[funcName];
if (!funcNode.empty())
{
if (!funcNode["pattern"].empty())
{
funcNode["pattern"] >> str;
if (fs["patterns"][str].empty())
CV_Error(cv::Error::StsParseError, "Scheduling pattern " + str +
" is not defined");
apply(fs["patterns"][str], func, funcsMap, funcNode["params"]);
}
else
{
apply(funcNode, func, funcsMap, funcNode["params"]);
}
}
else
{
if (funcsMap.empty())
return false;
}
funcsMap[funcName] = func;
}
return true;
#endif // HAVE_HALIDE
return false;
}
} // namespace dnn
} // namespace cv

View File

@ -0,0 +1,37 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#ifndef __OPENCV_DNN_HALIDE_SCHEDULER_HPP__
#define __OPENCV_DNN_HALIDE_SCHEDULER_HPP__
#include <opencv2/dnn.hpp>
namespace cv
{
namespace dnn
{
class HalideScheduler
{
public:
HalideScheduler(const std::string& configFile);
~HalideScheduler();
// Returns true if pipeline found in scheduling file.
// If more than one function, returns true if the top function scheduled.
// Other functions are optional to scheduling.
bool process(Ptr<BackendNode>& node);
private:
FileStorage fs;
};
} // namespace dnn
} // namespace cv
#endif // __OPENCV_DNN_HALIDE_SCHEDULER_HPP__

107
modules/dnn/src/init.cpp Normal file
View File

@ -0,0 +1,107 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
namespace dnn
{
struct AutoInitializer
{
bool status;
AutoInitializer() : status(false)
{
initModule();
}
};
static AutoInitializer init;
void initModule()
{
if (init.status)
return;
REG_RUNTIME_LAYER_CLASS(Slice, SliceLayer);
REG_RUNTIME_LAYER_CLASS(Split, SplitLayer);
REG_RUNTIME_LAYER_CLASS(Concat, ConcatLayer);
REG_RUNTIME_LAYER_CLASS(Reshape, ReshapeLayer);
REG_RUNTIME_LAYER_CLASS(Flatten, FlattenLayer);
REG_RUNTIME_LAYER_CLASS(Convolution, ConvolutionLayer);
REG_RUNTIME_LAYER_CLASS(Deconvolution, DeconvolutionLayer);
REG_RUNTIME_LAYER_CLASS(Pooling, PoolingLayer);
REG_RUNTIME_LAYER_CLASS(LRN, LRNLayer);
REG_RUNTIME_LAYER_CLASS(InnerProduct, InnerProductLayer);
REG_RUNTIME_LAYER_CLASS(Softmax, SoftmaxLayer);
REG_RUNTIME_LAYER_CLASS(MVN, MVNLayer);
REG_RUNTIME_LAYER_CLASS(ReLU, ReLULayer);
REG_RUNTIME_LAYER_CLASS(ChannelsPReLU, ChannelsPReLULayer);
REG_RUNTIME_LAYER_CLASS(Sigmoid, SigmoidLayer);
REG_RUNTIME_LAYER_CLASS(TanH, TanHLayer);
REG_RUNTIME_LAYER_CLASS(BNLL, BNLLLayer);
REG_RUNTIME_LAYER_CLASS(AbsVal, AbsLayer);
REG_RUNTIME_LAYER_CLASS(Power, PowerLayer);
REG_RUNTIME_LAYER_CLASS(BatchNorm, BatchNormLayer);
REG_RUNTIME_LAYER_CLASS(MaxUnpool, MaxUnpoolLayer);
REG_RUNTIME_LAYER_CLASS(Dropout, BlankLayer);
REG_RUNTIME_LAYER_CLASS(Identity, BlankLayer);
REG_RUNTIME_LAYER_CLASS(Crop, CropLayer);
REG_RUNTIME_LAYER_CLASS(Eltwise, EltwiseLayer);
REG_RUNTIME_LAYER_CLASS(Permute, PermuteLayer);
REG_RUNTIME_LAYER_CLASS(PriorBox, PriorBoxLayer);
REG_RUNTIME_LAYER_CLASS(DetectionOutput, DetectionOutputLayer);
REG_RUNTIME_LAYER_CLASS(NormalizeBBox, NormalizeBBoxLayer);
REG_RUNTIME_LAYER_CLASS(Normalize, NormalizeBBoxLayer);
REG_RUNTIME_LAYER_CLASS(Shift, ShiftLayer);
REG_RUNTIME_LAYER_CLASS(Padding, PaddingLayer);
REG_RUNTIME_LAYER_CLASS(Scale, ScaleLayer);
init.status = true;
}
}
}

View File

@ -0,0 +1,198 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of Batch Normalization layer.
*/
#include "../precomp.hpp"
#include "op_halide.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class BatchNormLayerImpl : public BatchNormLayer
{
public:
Mat weights_, bias_;
BatchNormLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
CV_Assert(blobs.size() >= 3);
hasWeights = params.get<bool>("has_weight", false);
hasBias = params.get<bool>("has_bias", false);
epsilon = params.get<float>("eps", 1E-5);
size_t n = blobs[0].total();
CV_Assert(blobs[1].total() == n &&
blobs[0].isContinuous() && blobs[1].isContinuous() &&
blobs[0].type() == CV_32F && blobs[1].type() == CV_32F);
float varMeanScale = 1.f;
if (!hasWeights && !hasBias) {
CV_Assert(blobs[2].type() == CV_32F);
varMeanScale = blobs[2].at<float>(0);
if (varMeanScale != 0)
varMeanScale = 1/varMeanScale;
}
const int weightsBlobIndex = 2;
const int biasBlobIndex = weightsBlobIndex + hasWeights;
if( hasWeights )
{
CV_Assert((size_t)weightsBlobIndex < blobs.size());
const Mat& w = blobs[weightsBlobIndex];
CV_Assert(w.isContinuous() && w.type() == CV_32F && w.total() == (size_t)n);
}
if( hasBias )
{
CV_Assert((size_t)biasBlobIndex < blobs.size());
const Mat& b = blobs[weightsBlobIndex];
CV_Assert(b.isContinuous() && b.type() == CV_32F && b.total() == (size_t)n);
}
const float* meanData = blobs[0].ptr<float>();
const float* stdData = blobs[1].ptr<float>();
const float* weightsData = hasWeights ? blobs[weightsBlobIndex].ptr<float>() : 0;
const float* biasData = hasBias ? blobs[biasBlobIndex].ptr<float>() : 0;
weights_.create(1, (int)n, CV_32F);
bias_.create(1, (int)n, CV_32F);
float* dstWeightsData = weights_.ptr<float>();
float* dstBiasData = bias_.ptr<float>();
for (size_t i = 0; i < n; ++i)
{
float w = (hasWeights ? weightsData[i] : 1.0f) / sqrt(stdData[i] * varMeanScale + epsilon);
dstWeightsData[i] = w;
dstBiasData[i] = (hasBias ? biasData[i] : 0.0f) - w * meanData[i] * varMeanScale;
}
}
void getScaleShift(Mat& scale, Mat& shift) const
{
scale = weights_;
shift = bias_;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide();
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
CV_Assert(blobs.size() >= 2);
CV_Assert(inputs.size() == 1);
Mat &inpBlob = *inputs[0];
int rows = inpBlob.size[2];
int cols = inpBlob.size[3];
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &outBlob = outputs[ii];
for(int num = 0; num < outBlob.size[0]; num++)
{
for (int n = 0; n < outBlob.size[1]; n++)
{
float w = weights_.at<float>(n);
float b = bias_.at<float>(n);
Mat inpBlobPlane(rows, cols, CV_32F, inpBlob.ptr<float>(num, n));
Mat outBlobPlane(rows, cols, CV_32F, outBlob.ptr<float>(num, n));
inpBlobPlane.convertTo(outBlobPlane, CV_32F, w, b);
}
}
}
}
virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node)
{
switch (node->backendId)
{
case DNN_BACKEND_HALIDE:
{
#ifdef HAVE_HALIDE
auto base = node.dynamicCast<HalideBackendNode>();
Halide::Func& input = base->funcs.back();
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = attachHalide(input(x, y, c, n));
return Ptr<BackendNode>(new HalideBackendNode(base, top));
#endif // HAVE_HALIDE
break;
}
}
return Ptr<BackendNode>();
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> input = halideBuffer(inputs[0]);
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = attachHalide(input(x, y, c, n));
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
#ifdef HAVE_HALIDE
// attachHalide can work both with Halide::Buffer and Halide::Func. In the
// second case it will be a fusion.
Halide::Func attachHalide(const Halide::Expr& input)
{
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::Var x("x"), y("y"), c("c"), n("n");
const int numChannels = weights_.total();
auto weights = wrapToHalideBuffer(weights_, {numChannels});
auto bias = wrapToHalideBuffer(bias_, {numChannels});
top(x, y, c, n) = input * weights(c) + bias(c);
return top;
}
#endif // HAVE_HALIDE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
int64 flops = 0;
for(int i = 0; i < inputs.size(); i++)
{
flops += 3*total(inputs[i]);
}
return flops;
}
};
Ptr<BatchNormLayer> BatchNormLayer::create(const LayerParams& params)
{
return Ptr<BatchNormLayer>(new BatchNormLayerImpl(params));
}
} // namespace dnn
} // namespace cv

View File

@ -0,0 +1,78 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class BlankLayerImpl : public BlankLayer
{
public:
BlankLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for (int i = 0, n = outputs.size(); i < n; ++i)
if (outputs[i].data != inputs[i]->data)
inputs[i]->copyTo(outputs[i]);
}
};
Ptr<BlankLayer> BlankLayer::create(const LayerParams& params)
{
return Ptr<BlankLayer>(new BlankLayerImpl(params));
}
}
}

View File

@ -0,0 +1,141 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "op_halide.hpp"
namespace cv
{
namespace dnn
{
class ConcatLayerImpl : public ConcatLayer
{
public:
ConcatLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
axis = params.get<int>("axis", 1);
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() > 0);
outputs.clear();
outputs.push_back(inputs[0]);
int cAxis = clamp(axis, inputs[0]);
int axisSum = 0;
for (size_t i = 0; i < inputs.size(); i++)
{
MatShape curShape = inputs[i];
CV_Assert(curShape.size() == outputs.back().size());
for (int curAxis = 0; curAxis < outputs.back().size(); curAxis++)
{
if (curAxis != cAxis && outputs.back()[curAxis] != curShape[curAxis])
CV_Error(Error::StsBadSize, "Inconsitent shape for ConcatLayer");
}
axisSum += curShape[cAxis];
}
outputs.back()[cAxis] = axisSum;
return false;
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1; // By channels
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
int cAxis = clamp(axis, inputs[0]->dims);
Mat& outMat = outputs[0];
std::vector<Range> ranges(outputs[0].dims, Range::all());
ranges[cAxis].start = 0;
for (size_t i = 0; i < inputs.size(); i++)
{
ranges[cAxis].end = ranges[cAxis].start + inputs[i]->size[cAxis];
inputs[i]->copyTo(outMat(&ranges[0]));
ranges[cAxis].start = ranges[cAxis].end;
}
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input)
{
#ifdef HAVE_HALIDE
std::vector<Halide::Buffer<> > inputBuffers = halideBuffers(input);
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
int offset = inputBuffers[0].channels();
Halide::Expr topExpr = select(c < offset,
inputBuffers[0](x, y, c, n),
inputBuffers[1](x, y, c - offset, n));
for (int i = 2; i < input.size(); ++i)
{
offset += inputBuffers[i - 1].channels();
topExpr = select(c < offset, topExpr,
inputBuffers[i](x, y, c - offset, n));
}
top(x, y, c, n) = topExpr;
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
};
Ptr<ConcatLayer> ConcatLayer::create(const LayerParams& params)
{
return Ptr<ConcatLayer>(new ConcatLayerImpl(params));
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,153 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
namespace cv
{
namespace dnn
{
class CropLayerImpl : public CropLayer
{
public:
CropLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
startAxis = params.get<int>("axis", 2);
const DictValue *paramOffset = params.ptr("offset");
if (paramOffset)
{
for (int i = 0; i < paramOffset->size(); i++)
offset.push_back(paramOffset->get<int>(i));
}
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() == 2);
MatShape dstShape = inputs[0];
int start = clamp(startAxis, dstShape);
for (int i = start; i < dstShape.size(); i++)
{
dstShape[i] = inputs[1][i];
}
outputs.resize(1, dstShape);
return false;
}
void finalize(const std::vector<Mat *> &inputs, std::vector<Mat> &outputs)
{
CV_Assert(2 == inputs.size());
const Mat &inpBlob = *inputs[0];
const Mat &inpSzBlob = *inputs[1];
int dims = inpBlob.dims;
int start_axis = clamp(startAxis, dims);
std::vector<int> offset_final(dims, 0);
if (offset.size() == 1)
{
for (int i = start_axis; i < dims; i++)
offset_final[i] = offset[0];
}
else if (offset.size() > 1)
{
if ((int)offset.size() != dims - start_axis)
CV_Error(Error::StsBadArg, "number of offset values specified must be "
"equal to the number of dimensions following axis.");
for (int i = start_axis; i < dims; i++)
offset_final[i] = offset[i - start_axis];
}
crop_ranges.resize(dims, Range::all());
for (int i = 0; i < dims; i++)
{
if( i < start_axis )
continue;
if (!offset.empty()) //normal case
{
if (offset_final[i] < 0 || offset_final[i] + inpSzBlob.size[i] > inpBlob.size[i])
CV_Error(Error::StsBadArg, "invalid crop parameters");
crop_ranges[i] = Range(offset_final[i], offset_final[i] + inpSzBlob.size[i]);
}
else //detect offset automatically so that cropped image is center of original one
{
if (inpSzBlob.size[i] > inpBlob.size[i])
CV_Error(Error::StsBadArg, "invalid output blob size");
int cur_crop = (inpBlob.size[i] - inpSzBlob.size[i]) / 2;
crop_ranges[i] = Range(cur_crop, cur_crop + inpSzBlob.size[i]);
}
}
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
Mat &input = *inputs[0];
Mat &output = outputs[0];
input(&crop_ranges[0]).copyTo(output);
}
std::vector<Range> crop_ranges;
};
Ptr<CropLayer> CropLayer::create(const LayerParams& params)
{
return Ptr<CropLayer>(new CropLayerImpl(params));
}
}
}

View File

@ -0,0 +1,798 @@
/*M ///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <float.h>
#include <string>
#include <caffe.pb.h>
namespace cv
{
namespace dnn
{
namespace util
{
template <typename T>
std::string to_string(T value)
{
std::ostringstream stream;
stream << value;
return stream.str();
}
template <typename T>
void make_error(const std::string& message1, const T& message2)
{
std::string error(message1);
error += std::string(util::to_string<int>(message2));
CV_Error(Error::StsBadArg, error.c_str());
}
template <typename T>
bool SortScorePairDescend(const std::pair<float, T>& pair1,
const std::pair<float, T>& pair2)
{
return pair1.first > pair2.first;
}
}
class DetectionOutputLayerImpl : public DetectionOutputLayer
{
public:
unsigned _numClasses;
bool _shareLocation;
int _numLocClasses;
int _backgroundLabelId;
typedef caffe::PriorBoxParameter_CodeType CodeType;
CodeType _codeType;
bool _varianceEncodedInTarget;
int _keepTopK;
float _confidenceThreshold;
float _nmsThreshold;
int _topK;
enum { _numAxes = 4 };
static const std::string _layerName;
typedef std::map<int, std::vector<caffe::NormalizedBBox> > LabelBBox;
bool getParameterDict(const LayerParams &params,
const std::string &parameterName,
DictValue& result)
{
if (!params.has(parameterName))
{
return false;
}
result = params.get(parameterName);
return true;
}
template<typename T>
T getParameter(const LayerParams &params,
const std::string &parameterName,
const size_t &idx=0,
const bool required=true,
const T& defaultValue=T())
{
DictValue dictValue;
bool success = getParameterDict(params, parameterName, dictValue);
if(!success)
{
if(required)
{
std::string message = _layerName;
message += " layer parameter does not contain ";
message += parameterName;
message += " parameter.";
CV_Error(Error::StsBadArg, message);
}
else
{
return defaultValue;
}
}
return dictValue.get<T>(idx);
}
void getCodeType(const LayerParams &params)
{
String codeTypeString = params.get<String>("code_type").toLowerCase();
if (codeTypeString == "corner")
_codeType = caffe::PriorBoxParameter_CodeType_CORNER;
else if (codeTypeString == "center_size")
_codeType = caffe::PriorBoxParameter_CodeType_CENTER_SIZE;
else
_codeType = caffe::PriorBoxParameter_CodeType_CORNER;
}
DetectionOutputLayerImpl(const LayerParams &params)
{
_numClasses = getParameter<unsigned>(params, "num_classes");
_shareLocation = getParameter<bool>(params, "share_location");
_numLocClasses = _shareLocation ? 1 : _numClasses;
_backgroundLabelId = getParameter<int>(params, "background_label_id");
_varianceEncodedInTarget = getParameter<bool>(params, "variance_encoded_in_target", 0, false, false);
_keepTopK = getParameter<int>(params, "keep_top_k");
_confidenceThreshold = getParameter<float>(params, "confidence_threshold", 0, false, -FLT_MAX);
_topK = getParameter<int>(params, "top_k", 0, false, -1);
getCodeType(params);
// Parameters used in nms.
_nmsThreshold = getParameter<float>(params, "nms_threshold");
CV_Assert(_nmsThreshold > 0.);
setParamsFrom(params);
}
void checkInputs(const std::vector<Mat*> &inputs)
{
for (size_t i = 1; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->size == inputs[0]->size);
}
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() > 0);
CV_Assert(inputs[0][0] == inputs[1][0]);
int numPriors = inputs[2][2] / 4;
CV_Assert((numPriors * _numLocClasses * 4) == inputs[0][1]);
CV_Assert(int(numPriors * _numClasses) == inputs[1][1]);
// num() and channels() are 1.
// Since the number of bboxes to be kept is unknown before nms, we manually
// set it to (fake) 1.
// Each row is a 7 dimension std::vector, which stores
// [image_id, label, confidence, xmin, ymin, xmax, ymax]
outputs.resize(1, shape(1, 1, 1, 7));
return false;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
const float* locationData = inputs[0]->ptr<float>();
const float* confidenceData = inputs[1]->ptr<float>();
const float* priorData = inputs[2]->ptr<float>();
int num = inputs[0]->size[0];
int numPriors = inputs[2]->size[2] / 4;
// Retrieve all location predictions.
std::vector<LabelBBox> allLocationPredictions;
GetLocPredictions(locationData, num, numPriors, _numLocClasses,
_shareLocation, &allLocationPredictions);
// Retrieve all confidences.
std::vector<std::map<int, std::vector<float> > > allConfidenceScores;
GetConfidenceScores(confidenceData, num, numPriors, _numClasses,
&allConfidenceScores);
// Retrieve all prior bboxes. It is same within a batch since we assume all
// images in a batch are of same dimension.
std::vector<caffe::NormalizedBBox> priorBBoxes;
std::vector<std::vector<float> > priorVariances;
GetPriorBBoxes(priorData, numPriors, &priorBBoxes, &priorVariances);
const bool clip_bbox = false;
// Decode all loc predictions to bboxes.
std::vector<LabelBBox> allDecodedBBoxes;
DecodeBBoxesAll(allLocationPredictions, priorBBoxes, priorVariances, num,
_shareLocation, _numLocClasses, _backgroundLabelId,
_codeType, _varianceEncodedInTarget, clip_bbox, &allDecodedBBoxes);
int numKept = 0;
std::vector<std::map<int, std::vector<int> > > allIndices;
for (int i = 0; i < num; ++i)
{
const LabelBBox& decodeBBoxes = allDecodedBBoxes[i];
const std::map<int, std::vector<float> >& confidenceScores =
allConfidenceScores[i];
std::map<int, std::vector<int> > indices;
int numDetections = 0;
for (int c = 0; c < (int)_numClasses; ++c)
{
if (c == _backgroundLabelId)
{
// Ignore background class.
continue;
}
if (confidenceScores.find(c) == confidenceScores.end())
{
// Something bad happened if there are no predictions for current label.
util::make_error<int>("Could not find confidence predictions for label ", c);
}
const std::vector<float>& scores = confidenceScores.find(c)->second;
int label = _shareLocation ? -1 : c;
if (decodeBBoxes.find(label) == decodeBBoxes.end())
{
// Something bad happened if there are no predictions for current label.
util::make_error<int>("Could not find location predictions for label ", label);
continue;
}
const std::vector<caffe::NormalizedBBox>& bboxes =
decodeBBoxes.find(label)->second;
ApplyNMSFast(bboxes, scores, _confidenceThreshold, _nmsThreshold, 1.0,
_topK, &(indices[c]));
numDetections += indices[c].size();
}
if (_keepTopK > -1 && numDetections > _keepTopK)
{
std::vector<std::pair<float, std::pair<int, int> > > scoreIndexPairs;
for (std::map<int, std::vector<int> >::iterator it = indices.begin();
it != indices.end(); ++it)
{
int label = it->first;
const std::vector<int>& labelIndices = it->second;
if (confidenceScores.find(label) == confidenceScores.end())
{
// Something bad happened for current label.
util::make_error<int>("Could not find location predictions for label ", label);
continue;
}
const std::vector<float>& scores = confidenceScores.find(label)->second;
for (size_t j = 0; j < labelIndices.size(); ++j)
{
size_t idx = labelIndices[j];
CV_Assert(idx < scores.size());
scoreIndexPairs.push_back(
std::make_pair(scores[idx], std::make_pair(label, idx)));
}
}
// Keep outputs k results per image.
std::sort(scoreIndexPairs.begin(), scoreIndexPairs.end(),
util::SortScorePairDescend<std::pair<int, int> >);
scoreIndexPairs.resize(_keepTopK);
// Store the new indices.
std::map<int, std::vector<int> > newIndices;
for (size_t j = 0; j < scoreIndexPairs.size(); ++j)
{
int label = scoreIndexPairs[j].second.first;
int idx = scoreIndexPairs[j].second.second;
newIndices[label].push_back(idx);
}
allIndices.push_back(newIndices);
numKept += _keepTopK;
}
else
{
allIndices.push_back(indices);
numKept += numDetections;
}
}
if (numKept == 0)
{
CV_ErrorNoReturn(Error::StsError, "Couldn't find any detections");
return;
}
int outputShape[] = {1, 1, numKept, 7};
outputs[0].create(4, outputShape, CV_32F);
float* outputsData = outputs[0].ptr<float>();
int count = 0;
for (int i = 0; i < num; ++i)
{
const std::map<int, std::vector<float> >& confidenceScores =
allConfidenceScores[i];
const LabelBBox& decodeBBoxes = allDecodedBBoxes[i];
for (std::map<int, std::vector<int> >::iterator it = allIndices[i].begin();
it != allIndices[i].end(); ++it)
{
int label = it->first;
if (confidenceScores.find(label) == confidenceScores.end())
{
// Something bad happened if there are no predictions for current label.
util::make_error<int>("Could not find confidence predictions for label ", label);
continue;
}
const std::vector<float>& scores = confidenceScores.find(label)->second;
int locLabel = _shareLocation ? -1 : label;
if (decodeBBoxes.find(locLabel) == decodeBBoxes.end())
{
// Something bad happened if there are no predictions for current label.
util::make_error<int>("Could not find location predictions for label ", locLabel);
continue;
}
const std::vector<caffe::NormalizedBBox>& bboxes =
decodeBBoxes.find(locLabel)->second;
std::vector<int>& indices = it->second;
for (size_t j = 0; j < indices.size(); ++j)
{
int idx = indices[j];
outputsData[count * 7] = i;
outputsData[count * 7 + 1] = label;
outputsData[count * 7 + 2] = scores[idx];
caffe::NormalizedBBox clipBBox = bboxes[idx];
outputsData[count * 7 + 3] = clipBBox.xmin();
outputsData[count * 7 + 4] = clipBBox.ymin();
outputsData[count * 7 + 5] = clipBBox.xmax();
outputsData[count * 7 + 6] = clipBBox.ymax();
++count;
}
}
}
}
// Compute bbox size.
float BBoxSize(const caffe::NormalizedBBox& bbox,
const bool normalized=true)
{
if (bbox.xmax() < bbox.xmin() || bbox.ymax() < bbox.ymin())
{
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0;
}
else
{
if (bbox.has_size())
{
return bbox.size();
}
else
{
float width = bbox.xmax() - bbox.xmin();
float height = bbox.ymax() - bbox.ymin();
if (normalized)
{
return width * height;
}
else
{
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
}
// Clip the caffe::NormalizedBBox such that the range for each corner is [0, 1].
void ClipBBox(const caffe::NormalizedBBox& bbox,
caffe::NormalizedBBox* clipBBox)
{
clipBBox->set_xmin(std::max(std::min(bbox.xmin(), 1.f), 0.f));
clipBBox->set_ymin(std::max(std::min(bbox.ymin(), 1.f), 0.f));
clipBBox->set_xmax(std::max(std::min(bbox.xmax(), 1.f), 0.f));
clipBBox->set_ymax(std::max(std::min(bbox.ymax(), 1.f), 0.f));
clipBBox->clear_size();
clipBBox->set_size(BBoxSize(*clipBBox));
clipBBox->set_difficult(bbox.difficult());
}
// Decode a bbox according to a prior bbox.
void DecodeBBox(
const caffe::NormalizedBBox& prior_bbox, const std::vector<float>& prior_variance,
const CodeType code_type, const bool variance_encoded_in_target,
const bool clip_bbox, const caffe::NormalizedBBox& bbox,
caffe::NormalizedBBox* decode_bbox) {
if (code_type == caffe::PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
decode_bbox->set_xmin(prior_bbox.xmin() + bbox.xmin());
decode_bbox->set_ymin(prior_bbox.ymin() + bbox.ymin());
decode_bbox->set_xmax(prior_bbox.xmax() + bbox.xmax());
decode_bbox->set_ymax(prior_bbox.ymax() + bbox.ymax());
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox->set_xmin(
prior_bbox.xmin() + prior_variance[0] * bbox.xmin());
decode_bbox->set_ymin(
prior_bbox.ymin() + prior_variance[1] * bbox.ymin());
decode_bbox->set_xmax(
prior_bbox.xmax() + prior_variance[2] * bbox.xmax());
decode_bbox->set_ymax(
prior_bbox.ymax() + prior_variance[3] * bbox.ymax());
}
} else if (code_type == caffe::PriorBoxParameter_CodeType_CENTER_SIZE) {
float prior_width = prior_bbox.xmax() - prior_bbox.xmin();
CV_Assert(prior_width > 0);
float prior_height = prior_bbox.ymax() - prior_bbox.ymin();
CV_Assert(prior_height > 0);
float prior_center_x = (prior_bbox.xmin() + prior_bbox.xmax()) / 2.;
float prior_center_y = (prior_bbox.ymin() + prior_bbox.ymax()) / 2.;
float decode_bbox_center_x, decode_bbox_center_y;
float decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = bbox.xmin() * prior_width + prior_center_x;
decode_bbox_center_y = bbox.ymin() * prior_height + prior_center_y;
decode_bbox_width = exp(bbox.xmax()) * prior_width;
decode_bbox_height = exp(bbox.ymax()) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_variance[0] * bbox.xmin() * prior_width + prior_center_x;
decode_bbox_center_y =
prior_variance[1] * bbox.ymin() * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_variance[2] * bbox.xmax()) * prior_width;
decode_bbox_height =
exp(prior_variance[3] * bbox.ymax()) * prior_height;
}
decode_bbox->set_xmin(decode_bbox_center_x - decode_bbox_width / 2.);
decode_bbox->set_ymin(decode_bbox_center_y - decode_bbox_height / 2.);
decode_bbox->set_xmax(decode_bbox_center_x + decode_bbox_width / 2.);
decode_bbox->set_ymax(decode_bbox_center_y + decode_bbox_height / 2.);
} else {
CV_Error(Error::StsBadArg, "Unknown LocLossType.");
}
float bbox_size = BBoxSize(*decode_bbox);
decode_bbox->set_size(bbox_size);
if (clip_bbox) {
ClipBBox(*decode_bbox, decode_bbox);
}
}
// Decode a set of bboxes according to a set of prior bboxes.
void DecodeBBoxes(
const std::vector<caffe::NormalizedBBox>& prior_bboxes,
const std::vector<std::vector<float> >& prior_variances,
const CodeType code_type, const bool variance_encoded_in_target,
const bool clip_bbox, const std::vector<caffe::NormalizedBBox>& bboxes,
std::vector<caffe::NormalizedBBox>* decode_bboxes) {
CV_Assert(prior_bboxes.size() == prior_variances.size());
CV_Assert(prior_bboxes.size() == bboxes.size());
int num_bboxes = prior_bboxes.size();
if (num_bboxes >= 1) {
CV_Assert(prior_variances[0].size() == 4);
}
decode_bboxes->clear();
for (int i = 0; i < num_bboxes; ++i) {
caffe::NormalizedBBox decode_bbox;
DecodeBBox(prior_bboxes[i], prior_variances[i], code_type,
variance_encoded_in_target, clip_bbox, bboxes[i], &decode_bbox);
decode_bboxes->push_back(decode_bbox);
}
}
// Decode all bboxes in a batch.
void DecodeBBoxesAll(const std::vector<LabelBBox>& all_loc_preds,
const std::vector<caffe::NormalizedBBox>& prior_bboxes,
const std::vector<std::vector<float> >& prior_variances,
const int num, const bool share_location,
const int num_loc_classes, const int background_label_id,
const CodeType code_type, const bool variance_encoded_in_target,
const bool clip, std::vector<LabelBBox>* all_decode_bboxes) {
CV_Assert(all_loc_preds.size() == num);
all_decode_bboxes->clear();
all_decode_bboxes->resize(num);
for (int i = 0; i < num; ++i) {
// Decode predictions into bboxes.
LabelBBox& decode_bboxes = (*all_decode_bboxes)[i];
for (int c = 0; c < num_loc_classes; ++c) {
int label = share_location ? -1 : c;
if (label == background_label_id) {
// Ignore background class.
continue;
}
if (all_loc_preds[i].find(label) == all_loc_preds[i].end()) {
// Something bad happened if there are no predictions for current label.
util::make_error<int>("Could not find location predictions for label ", label);
}
const std::vector<caffe::NormalizedBBox>& label_loc_preds =
all_loc_preds[i].find(label)->second;
DecodeBBoxes(prior_bboxes, prior_variances,
code_type, variance_encoded_in_target, clip,
label_loc_preds, &(decode_bboxes[label]));
}
}
}
// Get prior bounding boxes from prior_data.
// prior_data: 1 x 2 x num_priors * 4 x 1 blob.
// num_priors: number of priors.
// prior_bboxes: stores all the prior bboxes in the format of caffe::NormalizedBBox.
// prior_variances: stores all the variances needed by prior bboxes.
void GetPriorBBoxes(const float* priorData, const int& numPriors,
std::vector<caffe::NormalizedBBox>* priorBBoxes,
std::vector<std::vector<float> >* priorVariances)
{
priorBBoxes->clear();
priorVariances->clear();
for (int i = 0; i < numPriors; ++i)
{
int startIdx = i * 4;
caffe::NormalizedBBox bbox;
bbox.set_xmin(priorData[startIdx]);
bbox.set_ymin(priorData[startIdx + 1]);
bbox.set_xmax(priorData[startIdx + 2]);
bbox.set_ymax(priorData[startIdx + 3]);
float bboxSize = BBoxSize(bbox);
bbox.set_size(bboxSize);
priorBBoxes->push_back(bbox);
}
for (int i = 0; i < numPriors; ++i)
{
int startIdx = (numPriors + i) * 4;
std::vector<float> var;
for (int j = 0; j < 4; ++j)
{
var.push_back(priorData[startIdx + j]);
}
priorVariances->push_back(var);
}
}
// Scale the caffe::NormalizedBBox w.r.t. height and width.
void ScaleBBox(const caffe::NormalizedBBox& bbox,
const int height, const int width,
caffe::NormalizedBBox* scaleBBox)
{
scaleBBox->set_xmin(bbox.xmin() * width);
scaleBBox->set_ymin(bbox.ymin() * height);
scaleBBox->set_xmax(bbox.xmax() * width);
scaleBBox->set_ymax(bbox.ymax() * height);
scaleBBox->clear_size();
bool normalized = !(width > 1 || height > 1);
scaleBBox->set_size(BBoxSize(*scaleBBox, normalized));
scaleBBox->set_difficult(bbox.difficult());
}
// Get location predictions from loc_data.
// loc_data: num x num_preds_per_class * num_loc_classes * 4 blob.
// num: the number of images.
// num_preds_per_class: number of predictions per class.
// num_loc_classes: number of location classes. It is 1 if share_location is
// true; and is equal to number of classes needed to predict otherwise.
// share_location: if true, all classes share the same location prediction.
// loc_preds: stores the location prediction, where each item contains
// location prediction for an image.
void GetLocPredictions(const float* locData, const int num,
const int numPredsPerClass, const int numLocClasses,
const bool shareLocation, std::vector<LabelBBox>* locPreds)
{
locPreds->clear();
if (shareLocation)
{
CV_Assert(numLocClasses == 1);
}
locPreds->resize(num);
for (int i = 0; i < num; ++i)
{
LabelBBox& labelBBox = (*locPreds)[i];
for (int p = 0; p < numPredsPerClass; ++p)
{
int startIdx = p * numLocClasses * 4;
for (int c = 0; c < numLocClasses; ++c)
{
int label = shareLocation ? -1 : c;
if (labelBBox.find(label) == labelBBox.end())
{
labelBBox[label].resize(numPredsPerClass);
}
labelBBox[label][p].set_xmin(locData[startIdx + c * 4]);
labelBBox[label][p].set_ymin(locData[startIdx + c * 4 + 1]);
labelBBox[label][p].set_xmax(locData[startIdx + c * 4 + 2]);
labelBBox[label][p].set_ymax(locData[startIdx + c * 4 + 3]);
}
}
locData += numPredsPerClass * numLocClasses * 4;
}
}
// Get confidence predictions from conf_data.
// conf_data: num x num_preds_per_class * num_classes blob.
// num: the number of images.
// num_preds_per_class: number of predictions per class.
// num_classes: number of classes.
// conf_preds: stores the confidence prediction, where each item contains
// confidence prediction for an image.
void GetConfidenceScores(const float* confData, const int num,
const int numPredsPerClass, const int numClasses,
std::vector<std::map<int, std::vector<float> > >* confPreds)
{
confPreds->clear();
confPreds->resize(num);
for (int i = 0; i < num; ++i)
{
std::map<int, std::vector<float> >& labelScores = (*confPreds)[i];
for (int p = 0; p < numPredsPerClass; ++p)
{
int startIdx = p * numClasses;
for (int c = 0; c < numClasses; ++c)
{
labelScores[c].push_back(confData[startIdx + c]);
}
}
confData += numPredsPerClass * numClasses;
}
}
// Do non maximum suppression given bboxes and scores.
// Inspired by Piotr Dollar's NMS implementation in EdgeBox.
// https://goo.gl/jV3JYS
// bboxes: a set of bounding boxes.
// scores: a set of corresponding confidences.
// score_threshold: a threshold used to filter detection results.
// nms_threshold: a threshold used in non maximum suppression.
// top_k: if not -1, keep at most top_k picked indices.
// indices: the kept indices of bboxes after nms.
void ApplyNMSFast(const std::vector<caffe::NormalizedBBox>& bboxes,
const std::vector<float>& scores, const float score_threshold,
const float nms_threshold, const float eta, const int top_k,
std::vector<int>* indices) {
// Sanity check.
CV_Assert(bboxes.size() == scores.size());
// Get top_k scores (with corresponding indices).
std::vector<std::pair<float, int> > score_index_vec;
GetMaxScoreIndex(scores, score_threshold, top_k, &score_index_vec);
// Do nms.
float adaptive_threshold = nms_threshold;
indices->clear();
while (score_index_vec.size() != 0) {
const int idx = score_index_vec.front().second;
bool keep = true;
for (int k = 0; k < indices->size(); ++k) {
if (keep) {
const int kept_idx = (*indices)[k];
float overlap = JaccardOverlap(bboxes[idx], bboxes[kept_idx]);
keep = overlap <= adaptive_threshold;
} else {
break;
}
}
if (keep) {
indices->push_back(idx);
}
score_index_vec.erase(score_index_vec.begin());
if (keep && eta < 1 && adaptive_threshold > 0.5) {
adaptive_threshold *= eta;
}
}
}
// Get max scores with corresponding indices.
// scores: a set of scores.
// threshold: only consider scores higher than the threshold.
// top_k: if -1, keep all; otherwise, keep at most top_k.
// score_index_vec: store the sorted (score, index) pair.
void GetMaxScoreIndex(const std::vector<float>& scores, const float threshold,const int top_k,
std::vector<std::pair<float, int> >* score_index_vec)
{
// Generate index score pairs.
for (size_t i = 0; i < scores.size(); ++i)
{
if (scores[i] > threshold)
{
score_index_vec->push_back(std::make_pair(scores[i], i));
}
}
// Sort the score pair according to the scores in descending order
std::stable_sort(score_index_vec->begin(), score_index_vec->end(),
util::SortScorePairDescend<int>);
// Keep top_k scores if needed.
if (top_k > -1 && top_k < (int)score_index_vec->size())
{
score_index_vec->resize(top_k);
}
}
// Compute the intersection between two bboxes.
void IntersectBBox(const caffe::NormalizedBBox& bbox1,
const caffe::NormalizedBBox& bbox2,
caffe::NormalizedBBox* intersect_bbox) {
if (bbox2.xmin() > bbox1.xmax() || bbox2.xmax() < bbox1.xmin() ||
bbox2.ymin() > bbox1.ymax() || bbox2.ymax() < bbox1.ymin())
{
// Return [0, 0, 0, 0] if there is no intersection.
intersect_bbox->set_xmin(0);
intersect_bbox->set_ymin(0);
intersect_bbox->set_xmax(0);
intersect_bbox->set_ymax(0);
}
else
{
intersect_bbox->set_xmin(std::max(bbox1.xmin(), bbox2.xmin()));
intersect_bbox->set_ymin(std::max(bbox1.ymin(), bbox2.ymin()));
intersect_bbox->set_xmax(std::min(bbox1.xmax(), bbox2.xmax()));
intersect_bbox->set_ymax(std::min(bbox1.ymax(), bbox2.ymax()));
}
}
// Compute the jaccard (intersection over union IoU) overlap between two bboxes.
float JaccardOverlap(const caffe::NormalizedBBox& bbox1,
const caffe::NormalizedBBox& bbox2,
const bool normalized=true)
{
caffe::NormalizedBBox intersect_bbox;
IntersectBBox(bbox1, bbox2, &intersect_bbox);
float intersect_width, intersect_height;
if (normalized)
{
intersect_width = intersect_bbox.xmax() - intersect_bbox.xmin();
intersect_height = intersect_bbox.ymax() - intersect_bbox.ymin();
}
else
{
intersect_width = intersect_bbox.xmax() - intersect_bbox.xmin() + 1;
intersect_height = intersect_bbox.ymax() - intersect_bbox.ymin() + 1;
}
if (intersect_width > 0 && intersect_height > 0)
{
float intersect_size = intersect_width * intersect_height;
float bbox1_size = BBoxSize(bbox1);
float bbox2_size = BBoxSize(bbox2);
return intersect_size / (bbox1_size + bbox2_size - intersect_size);
}
else
{
return 0.;
}
}
};
const std::string DetectionOutputLayerImpl::_layerName = std::string("DetectionOutput");
Ptr<DetectionOutputLayer> DetectionOutputLayer::create(const LayerParams &params)
{
return Ptr<DetectionOutputLayer>(new DetectionOutputLayerImpl(params));
}
}
}

View File

@ -0,0 +1,502 @@
#include "../precomp.hpp"
#include "op_halide.hpp"
#include "opencv2/imgproc.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
using std::abs;
using std::exp;
using std::tanh;
using std::pow;
template<typename Func>
class ElementWiseLayer : public Func::Layer
{
public:
class PBody : public cv::ParallelLoopBody
{
public:
const Func* func_;
const Mat* src_;
Mat* dst_;
int nstripes_;
PBody(const Func &func, const Mat &src, Mat& dst, int nstripes)
{
func_ = &func;
src_ = &src;
dst_ = &dst;
nstripes_ = nstripes;
}
void operator()(const Range &r) const
{
int nstripes = nstripes_, nsamples, outCn;
size_t planeSize;
if( src_->dims == 4 )
{
nsamples = src_->size[0];
outCn = src_->size[1];
planeSize = (size_t)src_->size[2]*src_->size[3];
}
else
{
nsamples = outCn = 1;
planeSize = (size_t)src_->total();
}
size_t stripeSize = (planeSize + nstripes - 1)/nstripes;
size_t stripeStart = r.start*stripeSize;
size_t stripeEnd = std::min(r.end*stripeSize, planeSize);
for( int i = 0; i < nsamples; i++ )
{
const float* srcptr = src_->ptr<float>(i) + stripeStart;
float* dstptr = dst_->ptr<float>(i) + stripeStart;
func_->apply(srcptr, dstptr, (int)(stripeEnd - stripeStart), planeSize, 0, outCn);
}
}
};
ElementWiseLayer(const Func &f=Func()) { func = f; }
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide();
}
virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node)
{
switch (node->backendId)
{
case DNN_BACKEND_HALIDE:
{
#ifdef HAVE_HALIDE
auto base = node.dynamicCast<HalideBackendNode>();
Halide::Func& input = base->funcs.back();
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (this->name.empty() ? Halide::Func() : Halide::Func(this->name));
func.attachHalide(input(x, y, c, n), top);
return Ptr<BackendNode>(new HalideBackendNode(base, top));
#endif // HAVE_HALIDE
break;
}
}
return Ptr<BackendNode>();
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> input = halideBuffer(inputs[0]);
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (this->name.empty() ? Halide::Func() : Halide::Func(this->name));
func.attachHalide(input(x, y, c, n), top);
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for (size_t i = 0; i < inputs.size(); i++)
{
const Mat &src = *inputs[i];
Mat &dst = outputs[i];
CV_Assert(src.size == dst.size && src.type() == dst.type() &&
src.isContinuous() && dst.isContinuous() && src.type() == CV_32F);
const int nstripes = getNumThreads();
PBody body(func, src, dst, nstripes);
parallel_for_(Range(0, nstripes), body, nstripes);
}
}
void forwardSlice(const float* src, float* dst, int len, size_t planeSize, int cn0, int cn1) const
{
func.apply(src, dst, len, planeSize, cn0, cn1);
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
long flops = 0;
for (int i = 0; i < outputs.size(); i++)
{
flops += total(outputs[i]) * func.getFLOPSPerElement();
}
return flops;
}
Func func;
bool run_parallel;
};
struct ReLUFunctor
{
typedef ReLULayer Layer;
float slope;
explicit ReLUFunctor(float slope_=1.f) : slope(slope_) {}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
{
float s = slope;
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
{
int i = 0;
#if CV_SIMD128
v_float32x4 s4 = v_setall_f32(s), z = v_setzero_f32();
for( ; i <= len - 16; i += 16 )
{
v_float32x4 x0 = v_load(srcptr + i);
v_float32x4 x1 = v_load(srcptr + i + 4);
v_float32x4 x2 = v_load(srcptr + i + 8);
v_float32x4 x3 = v_load(srcptr + i + 12);
x0 = v_select(x0 >= z, x0, x0*s4);
x1 = v_select(x1 >= z, x1, x1*s4);
x2 = v_select(x2 >= z, x2, x2*s4);
x3 = v_select(x3 >= z, x3, x3*s4);
v_store(dstptr + i, x0);
v_store(dstptr + i + 4, x1);
v_store(dstptr + i + 8, x2);
v_store(dstptr + i + 12, x3);
}
#endif
for( ; i < len; i++ )
{
float x = srcptr[i];
dstptr[i] = x >= 0.f ? x : s*x;
}
}
}
#ifdef HAVE_HALIDE
void attachHalide(const Halide::Expr& input, Halide::Func& top)
{
Halide::Var x("x"), y("y"), c("c"), n("n");
if (slope)
{
top(x, y, c, n) = select(input >= 0.0f, input, slope);
}
else
{
top(x, y, c, n) = max(input, 0.0f);
}
}
#endif // HAVE_HALIDE
int64 getFLOPSPerElement() const { return 1; }
};
struct TanHFunctor
{
typedef TanHLayer Layer;
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
{
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
{
for( int i = 0; i < len; i++ )
{
float x = srcptr[i];
dstptr[i] = tanh(x);
}
}
}
#ifdef HAVE_HALIDE
void attachHalide(const Halide::Expr& input, Halide::Func& top)
{
Halide::Var x("x"), y("y"), c("c"), n("n");
top(x, y, c, n) = tanh(input);
}
#endif // HAVE_HALIDE
int64 getFLOPSPerElement() const { return 1; }
};
struct SigmoidFunctor
{
typedef SigmoidLayer Layer;
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
{
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
{
for( int i = 0; i < len; i++ )
{
float x = srcptr[i];
dstptr[i] = 1.f/(1.f + exp(-x));
}
}
}
#ifdef HAVE_HALIDE
void attachHalide(const Halide::Expr& input, Halide::Func& top)
{
Halide::Var x("x"), y("y"), c("c"), n("n");
top(x, y, c, n) = 1.0f / (1.0f + exp(-input));
}
#endif // HAVE_HALIDE
int64 getFLOPSPerElement() const { return 3; }
};
struct AbsValFunctor
{
typedef AbsLayer Layer;
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
{
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
{
for( int i = 0; i < len; i++ )
{
float x = srcptr[i];
dstptr[i] = abs(x);
}
}
}
#ifdef HAVE_HALIDE
void attachHalide(const Halide::Expr& input, Halide::Func& top)
{
Halide::Var x("x"), y("y"), c("c"), n("n");
top(x, y, c, n) = abs(input);
}
#endif // HAVE_HALIDE
int64 getFLOPSPerElement() const { return 1; }
};
struct BNLLFunctor
{
typedef BNLLLayer Layer;
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
{
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
{
for( int i = 0; i < len; i++ )
{
float x = srcptr[i];
dstptr[i] = log(1.f + exp(-abs(x)));
}
}
}
#ifdef HAVE_HALIDE
void attachHalide(const Halide::Expr& input, Halide::Func& top)
{
Halide::Var x("x"), y("y"), c("c"), n("n");
top(x, y, c, n) = log(1.0f + exp(-abs(input)));
}
#endif // HAVE_HALIDE
int64 getFLOPSPerElement() const { return 5; }
};
struct PowerFunctor
{
typedef PowerLayer Layer;
float power;
float scale;
float shift;
explicit PowerFunctor(float power_ = 1.f, float scale_ = 1.f, float shift_ = 0.f)
: power(power_), scale(scale_), shift(shift_) {}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
{
float a = scale, b = shift, p = power;
if( p == 1.f )
{
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
{
for( int i = 0; i < len; i++ )
{
float x = srcptr[i];
dstptr[i] = a*x + b;
}
}
}
else
{
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
{
for( int i = 0; i < len; i++ )
{
float x = srcptr[i];
dstptr[i] = pow(a*x + b, p);
}
}
}
}
#ifdef HAVE_HALIDE
void attachHalide(const Halide::Expr& input, Halide::Func& top)
{
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Expr topExpr = (scale == 1.0f ? input : input * scale);
if (shift)
{
topExpr += shift;
}
if (power != 1.0f)
{
topExpr = pow(topExpr, power);
}
top(x, y, c, n) = topExpr;
}
#endif // HAVE_HALIDE
int64 getFLOPSPerElement() const { return power == 1 ? 2 : 10; }
};
struct ChannelsPReLUFunctor
{
typedef ChannelsPReLULayer Layer;
Mat scale;
explicit ChannelsPReLUFunctor(const Mat& scale_=Mat()) : scale(scale_)
{
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
{
CV_Assert(scale.isContinuous() && scale.type() == CV_32F);
const float* scaleptr = scale.ptr<float>();
CV_Assert( 0 <= cn0 && cn0 < cn1 && cn1 <= (int)scale.total() );
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
{
float s = scaleptr[cn];
int i = 0;
#if CV_SIMD128
v_float32x4 s4 = v_setall_f32(s), z = v_setzero_f32();
for( ; i <= len - 16; i += 16 )
{
v_float32x4 x0 = v_load(ptr + i);
v_float32x4 x1 = v_load(ptr + i + 4);
v_float32x4 x2 = v_load(ptr + i + 8);
v_float32x4 x3 = v_load(ptr + i + 12);
x0 = v_select(x0 >= z, x0, x0*s4);
x1 = v_select(x1 >= z, x1, x1*s4);
x2 = v_select(x2 >= z, x2, x2*s4);
x3 = v_select(x3 >= z, x3, x3*s4);
v_store(ptr + i, x0);
v_store(ptr + i + 4, x1);
v_store(ptr + i + 8, x2);
v_store(ptr + i + 12, x3);
}
#endif
for( ; i < len; i++ )
{
float x = srcptr[i];
dstptr[i] = x >= 0.f ? x : s*x;
}
}
}
#ifdef HAVE_HALIDE
void attachHalide(const Halide::Expr& input, Halide::Func& top)
{
Halide::Var x("x"), y("y"), c("c"), n("n");
auto weights = wrapToHalideBuffer(scale, {(int)scale.total()});
top(x, y, c, n) = select(input >= 0.0f, input, weights(c) * input);
}
#endif // HAVE_HALIDE
int64 getFLOPSPerElement() const { return 1; }
};
#define ACTIVATION_CREATOR_FOR(_Layer, _Functor, ...) \
Ptr<_Layer> _Layer::create() { \
return return Ptr<_Layer>( new ElementWiseLayer<_Functor>(_Functor()) ); }
Ptr<ReLULayer> ReLULayer::create(const LayerParams& params)
{
float negativeSlope = params.get<float>("negative_slope", 0.f);
Ptr<ReLULayer> l(new ElementWiseLayer<ReLUFunctor>(ReLUFunctor(negativeSlope)));
l->setParamsFrom(params);
l->negativeSlope = negativeSlope;
return l;
}
Ptr<TanHLayer> TanHLayer::create(const LayerParams& params)
{
Ptr<TanHLayer> l(new ElementWiseLayer<TanHFunctor>());
l->setParamsFrom(params);
return l;
}
Ptr<SigmoidLayer> SigmoidLayer::create(const LayerParams& params)
{
Ptr<SigmoidLayer> l(new ElementWiseLayer<SigmoidFunctor>());
l->setParamsFrom(params);
return l;
}
Ptr<AbsLayer> AbsLayer::create(const LayerParams& params)
{
Ptr<AbsLayer> l(new ElementWiseLayer<AbsValFunctor>());
l->setParamsFrom(params);
return l;
}
Ptr<BNLLLayer> BNLLLayer::create(const LayerParams& params)
{
Ptr<BNLLLayer> l(new ElementWiseLayer<BNLLFunctor>());
l->setParamsFrom(params);
return l;
}
Ptr<PowerLayer> PowerLayer::create(const LayerParams& params)
{
float power = params.get<float>("power", 1.0f);
float scale = params.get<float>("scale", 1.0f);
float shift = params.get<float>("shift", 0.0f);
Ptr<PowerLayer> l(new ElementWiseLayer<PowerFunctor>(PowerFunctor(power, scale, shift)));
l->setParamsFrom(params);
l->power = power;
l->scale = scale;
l->shift = shift;
return l;
}
Ptr<ChannelsPReLULayer> ChannelsPReLULayer::create(const LayerParams& params)
{
Ptr<ChannelsPReLULayer> l(new ElementWiseLayer<ChannelsPReLUFunctor>(ChannelsPReLUFunctor(params.blobs[0])));
l->setParamsFrom(params);
return l;
}
}
}

View File

@ -0,0 +1,219 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "op_halide.hpp"
namespace cv
{
namespace dnn
{
class EltwiseLayerImpl : public EltwiseLayer
{
public:
EltwiseOp op;
std::vector<int> coeffs;
EltwiseLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
op = EltwiseLayer::SUM;
if (params.has("operation"))
{
String operation = params.get<String>("operation").toLowerCase();
if (operation == "prod")
op = EltwiseLayer::PROD;
else if (operation == "sum")
op = EltwiseLayer::SUM;
else if (operation == "max")
op = EltwiseLayer::MAX;
else
CV_Error(cv::Error::StsBadArg, "Unknown operaticon type \"" + operation + "\"");
}
if (params.has("coeff"))
{
DictValue paramCoeff = params.get("coeff");
int i, n = paramCoeff.size();
coeffs.resize(n);
for (i = 0; i < n; i++)
{
coeffs[i] = paramCoeff.get<int>(i);
}
}
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide();
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() >= 2);
CV_Assert(coeffs.size() == 0 || coeffs.size() == inputs.size());
CV_Assert(op == SUM || coeffs.size() == 0);
for (int i = 1; i < inputs.size(); i++)
{
CV_Assert(inputs[0] == inputs[i]);
}
outputs.assign(1, inputs[0]);
return false;
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
Mat& output = outputs[0];
switch (op)
{
case SUM:
CV_Assert(coeffs.size() == 0 || coeffs.size() == inputs.size());
if (0 < coeffs.size())
{
output.setTo(0.);
for (size_t i = 0; i < inputs.size(); i++)
{
output += *inputs[i] * coeffs[i];
}
}
else
{
add(*inputs[0], *inputs[1], output);
for (size_t i = 2; i < inputs.size(); i++)
{
output += *inputs[i];
}
}
break;
case PROD:
output.setTo(1.);
for (size_t i = 0; i < inputs.size(); i++)
{
output = output.mul(*inputs[i]);
}
break;
case MAX:
cv::max(*inputs[0], *inputs[1], output);
for (size_t i = 2; i < inputs.size(); i++)
{
cv::max(output, *inputs[i], output);
}
break;
default:
CV_Assert(0);
break;
}
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input)
{
#ifdef HAVE_HALIDE
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::Expr topExpr;
std::vector<Halide::Buffer<> > inputBuffers = halideBuffers(input);
switch (op)
{
case SUM:
if (coeffs.empty())
{
topExpr = inputBuffers[0](x, y, c, n) +
inputBuffers[1](x, y, c, n);
for (int i = 2; i < inputBuffers.size(); ++i)
topExpr += inputBuffers[i](x, y, c, n);
}
else
{
topExpr = coeffs[0] * inputBuffers[0](x, y, c, n) +
coeffs[1] * inputBuffers[1](x, y, c, n);
for (int i = 2; i < inputBuffers.size(); ++i)
topExpr += coeffs[i] * inputBuffers[i](x, y, c, n);
}
break;
case PROD:
topExpr = inputBuffers[0](x, y, c, n) *
inputBuffers[1](x, y, c, n);
for (int i = 2; i < inputBuffers.size(); ++i)
topExpr *= inputBuffers[i](x, y, c, n);
break;
case MAX:
topExpr = max(inputBuffers[0](x, y, c, n),
inputBuffers[1](x, y, c, n));
for (int i = 2; i < inputBuffers.size(); ++i)
topExpr = max(topExpr, inputBuffers[i](x, y, c, n));
break;
default:
return Ptr<BackendNode>();
}
top(x, y, c, n) = topExpr;
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
CV_Assert(inputs.size());
long flops = inputs.size() * total(inputs[0]);
return flops;
}
};
Ptr<EltwiseLayer> EltwiseLayer::create(const LayerParams& params)
{
return Ptr<EltwiseLayer>(new EltwiseLayerImpl(params));
}
}
}

View File

@ -0,0 +1,125 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <float.h>
#include <algorithm>
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class FlattenLayerImpl : public FlattenLayer
{
public:
FlattenLayerImpl(const LayerParams &params)
{
_startAxis = params.get<int>("axis", 1);
_endAxis = params.get<int>("end_axis", -1);
setParamsFrom(params);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() > 0);
for (size_t i = 1; i < inputs.size(); i++)
{
CV_Assert(inputs[i] == inputs[0]);
}
int numAxes = inputs[0].size();
int startAxis = clamp(_startAxis, numAxes);
int endAxis = clamp(_endAxis, numAxes);
for (size_t i = 1; i < inputs.size(); i++)
{
CV_Assert(inputs[i] == inputs[0]);
}
CV_Assert(startAxis >= 0);
CV_Assert(endAxis >= startAxis && endAxis < (int)numAxes);
size_t flattenedDimensionSize = total(inputs[0], startAxis, endAxis + 1);
MatShape outputShapeVec;
for (int i = 0; i < startAxis; i++)
{
outputShapeVec.push_back(inputs[0][i]);
}
outputShapeVec.push_back(flattenedDimensionSize);
for (size_t i = endAxis + 1; i < numAxes; i++)
{
outputShapeVec.push_back(inputs[0][i]);
}
CV_Assert(outputShapeVec.size() <= 4);
outputs.resize(inputs.size(), outputShapeVec);
return true;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for (size_t i = 0; i < inputs.size(); i++)
{
MatShape outShape = shape(outputs[i]);
outputs[i] = inputs[i]->reshape(1, (int)outShape.size(), &outShape[0]);
}
}
int _startAxis;
int _endAxis;
};
Ptr<FlattenLayer> FlattenLayer::create(const LayerParams& params)
{
return Ptr<FlattenLayer>(new FlattenLayerImpl(params));
}
}
}

View File

@ -0,0 +1,281 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "op_halide.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class FullyConnectedLayerImpl : public InnerProductLayer
{
public:
enum { VEC_ALIGN = 8 };
FullyConnectedLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
CV_Assert(1 <= blobs.size() && blobs.size() <= 2);
int numOutput = params.get<int>("num_output");
int innerSize = (int)blobs[0].total() / numOutput;
bias = params.get<bool>("bias_term", true);
axis = params.get<int>("axis", 1);
CV_Assert(blobs[0].dims >= 2 && (size_t)(innerSize * numOutput) == blobs[0].total());
CV_Assert(!bias || (blobs.size() == 2 && (size_t)numOutput == blobs[1].total()));
weightsMat = blobs[0] = blobs[0].reshape(1, numOutput);
int vecsize = weightsMat.cols;
if( vecsize % VEC_ALIGN != 0 )
{
int vecsize_aligned = (int)alignSize(vecsize, VEC_ALIGN);
Mat weightsBuf(weightsMat.rows, vecsize_aligned, weightsMat.type());
Mat wpadding = weightsBuf.colRange(vecsize, vecsize_aligned);
wpadding.setTo(Scalar::all(0.));
weightsMat = weightsBuf.colRange(0, vecsize);
blobs[0].copyTo(weightsMat);
blobs[0] = weightsMat;
}
if (bias)
biasMat = blobs[1] = blobs[1].reshape(1, 1);
else
biasMat = Mat::zeros(1, numOutput, weightsMat.type());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &) const
{
CV_Assert(inputs.size() > 0);
CV_Assert(1 <= blobs.size() && blobs.size() <= 2);
CV_Assert(blobs[0].dims == 2);
int cAxis = clamp(axis, inputs[0]);
int outerSize = total(inputs[0], 0, cAxis);
int numOutput = blobs[0].size[0];
outputs.resize(inputs.size(), shape(outerSize, numOutput));
CV_Assert(!bias || (size_t)numOutput == blobs[1].total());
return false;
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1;
}
class FullConnected : public ParallelLoopBody
{
public:
FullConnected(const Mat& srcMat, const Mat& weights, const Mat& biasMat, Mat& dstMat, int nstripes)
{
CV_Assert( srcMat.dims == 2 && srcMat.cols == weights.cols &&
dstMat.rows == srcMat.rows && dstMat.cols == weights.rows &&
srcMat.type() == weights.type() && weights.type() == dstMat.type() &&
srcMat.type() == CV_32F &&
(biasMat.empty() || (biasMat.type() == srcMat.type() &&
biasMat.isContinuous() && (int)biasMat.total() == dstMat.cols)) );
srcMat_ = &srcMat;
weights_ = &weights;
biasMat_ = &biasMat;
dstMat_ = &dstMat;
nstripes_ = nstripes;
useAVX2_ = checkHardwareSupport(CPU_AVX2);
}
void operator()(const Range& r) const
{
int valign = FullyConnectedLayerImpl::VEC_ALIGN;
int nsamples = srcMat_->rows;
int nw0 = weights_->rows;
int k, vecsize = srcMat_->cols;
int vecsize_aligned = (int)alignSize(vecsize, VEC_ALIGN);
int nstripes = nstripes_;
size_t total = (size_t)nsamples*nw0;
size_t stripeSize = (total + nstripes - 1)/nstripes;
size_t stripeStart = r.start*stripeSize;
size_t stripeEnd = r.end == nstripes ? total : std::min(r.end*stripeSize, total);
size_t wstep = weights_->step1();
AutoBuffer<float> srcbuf(vecsize_aligned + valign);
float* sptr = alignPtr((float*)srcbuf, (int)(valign*sizeof(float)));
for( k = vecsize; k < vecsize_aligned; k++ )
sptr[k] = 0.f;
for( size_t ofs = stripeStart; ofs < stripeEnd; )
{
int sampleIdx = (int)(ofs / nw0);
int delta = (int)(ofs - (size_t)sampleIdx*nw0);
const float* sptr_ = srcMat_->ptr<float>(sampleIdx);
const float* wptr = weights_->ptr<float>(delta);
float* dptr = dstMat_->ptr<float>(sampleIdx) + delta;
const float* biasptr = biasMat_->ptr<float>() + delta;
int nw = std::min(nw0 - delta, (int)(stripeEnd - ofs));
memcpy(sptr, sptr_, vecsize*sizeof(sptr[0]));
#if CV_DNN_TRY_AVX2
if( useAVX2_ )
fastGEMM1T_avx2( sptr, wptr, wstep, biasptr, dptr, nw, vecsize);
else
#endif
{
int i = 0;
#if CV_SIMD128
for( ; i <= nw - 4; i += 4, wptr += 4*wstep )
{
vfloat32x4 vs0 = v_setall_f32(0.f), vs1 = v_setall_f32(0.f);
vfloat32x4 vs2 = v_setall_f32(0.f), vs3 = v_setall_f32(0.f);
for( k = 0; k < vecsize; k += 4 )
{
vfloat32x4 v = v_load_aligned(sptr + k);
vs0 += v*v_load_aligned(wptr + k);
vs1 += v*v_load_aligned(wptr + wstep + k);
vs2 += v*v_load_aligned(wptr + wstep*2 + k);
vs3 += v*v_load_aligned(wptr + wstep*3 + k);
}
vfloat32x4 s = v_reduce_sum4(vs0, vs1, vs2, vs3);
s += v_load(biasptr + i);
v_store(dptr + i, s);
}
#endif
for( ; i < nw; i++, wptr += wstep )
{
float s0=biasptr[i];
for( k = 0; k < vecsize; k++ )
{
float v = sptr[k];
s0 += v*wptr[k];
}
dptr[i] = s0;
}
}
ofs += nw;
}
}
const Mat *srcMat_, *weights_, *biasMat_;
Mat* dstMat_;
int nstripes_;
bool useAVX2_;
};
void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &)
{
int axisCan = clamp(axis, input[0]->dims);
int outerSize = input[0]->total(0, axisCan);
for (size_t i = 0; i < input.size(); i++)
{
Mat srcMat = input[i]->reshape(1, outerSize);
Mat dstMat = output[i].reshape(1, outerSize);
const int nstripes = getNumThreads();
FullConnected fconn(srcMat, weightsMat, biasMat, dstMat, nstripes);
parallel_for_(Range(0, nstripes), fconn, nstripes);
}
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
#ifdef HAVE_HALIDE
int inW, inH, inC, inN, outC = blobs[0].size[0];
Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN);
auto weights = wrapToHalideBuffer(blobs[0], {inW, inH, inC, outC});
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::RDom r(0, inW, 0, inH, 0, inC);
Halide::Expr topExpr = sum(inputBuffer(r.x, r.y, r.z, n) *
weights(r.x, r.y, r.z, c));
if (bias)
{
Halide::Buffer<float> bias = wrapToHalideBuffer(blobs[1], {outC});
topExpr += bias(c);
}
top(x, y, c, n) = topExpr;
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)inputs; // suppress unused variable warning
long flops = 0;
int innerSize = blobs[0].size[1];
for(int i = 0; i < outputs.size(); i++)
{
flops += 3*innerSize*total(outputs[i]);
}
return flops;
}
bool bias;
Mat weightsMat, biasMat;
};
Ptr<InnerProductLayer> InnerProductLayer::create(const LayerParams& params)
{
return Ptr<InnerProductLayer>(new FullyConnectedLayerImpl(params));
}
}
}

View File

@ -0,0 +1,356 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "layers_common.hpp"
#include "opencv2/core/hal/intrin.hpp"
#if CV_DNN_TRY_AVX2
#include <immintrin.h>
namespace cv {
namespace dnn {
void fastConv_avx2( const float* weights, size_t wstep, const float* bias,
const float* rowbuf, float* output, const int* outShape,
int blockSize, int vecsize, int vecsize_aligned,
const float* relu, bool initOutput )
{
int outCn = outShape[1];
size_t outPlaneSize = outShape[2]*outShape[3];
float r0 = 1.f, r1 = 1.f, r2 = 1.f;
__m256 vr0 = _mm256_set1_ps(1.f), vr1 = vr0, vr2 = vr0, z = _mm256_setzero_ps();
// now compute dot product of the weights
// and im2row-transformed part of the tensor
for( int i = 0; i < outCn; i += 3 )
{
const float* wptr0 = weights + i*wstep;
const float* wptr1 = wptr0 + wstep;
const float* wptr2 = wptr1 + wstep;
float* outptr0 = output + i*outPlaneSize;
float* outptr1 = outptr0 + outPlaneSize;
float* outptr2 = outptr1 + outPlaneSize;
float bias0 = bias[i], bias1 = bias[i+1], bias2 = bias[i+2];
if( i+2 >= outCn )
{
wptr2 = wptr1;
outptr2 = outptr1;
bias2 = bias1;
if( i+1 >= outCn )
{
wptr2 = wptr1 = wptr0;
outptr2 = outptr1 = outptr0;
bias2 = bias1 = bias0;
}
}
if( relu )
{
r0 = relu[i];
r1 = relu[i+1];
r2 = relu[i+2];
vr0 = _mm256_set1_ps(r0);
vr1 = _mm256_set1_ps(r1);
vr2 = _mm256_set1_ps(r2);
}
int j = 0;
for( ; j <= blockSize - 4; j += 4 )
{
const float* rptr = rowbuf + j*vecsize_aligned;
__m256 vs00 = _mm256_setzero_ps(), vs01 = _mm256_setzero_ps(),
vs02 = _mm256_setzero_ps(), vs03 = _mm256_setzero_ps(),
vs10 = _mm256_setzero_ps(), vs11 = _mm256_setzero_ps(),
vs12 = _mm256_setzero_ps(), vs13 = _mm256_setzero_ps(),
vs20 = _mm256_setzero_ps(), vs21 = _mm256_setzero_ps(),
vs22 = _mm256_setzero_ps(), vs23 = _mm256_setzero_ps();
for( int k = 0; k < vecsize; k += 8, rptr += 8 )
{
__m256 w0 = _mm256_load_ps(wptr0 + k);
__m256 w1 = _mm256_load_ps(wptr1 + k);
__m256 w2 = _mm256_load_ps(wptr2 + k);
__m256 r0 = _mm256_load_ps(rptr);
vs00 = _mm256_fmadd_ps(w0, r0, vs00);
vs10 = _mm256_fmadd_ps(w1, r0, vs10);
vs20 = _mm256_fmadd_ps(w2, r0, vs20);
r0 = _mm256_load_ps(rptr + vecsize_aligned);
vs01 = _mm256_fmadd_ps(w0, r0, vs01);
vs11 = _mm256_fmadd_ps(w1, r0, vs11);
vs21 = _mm256_fmadd_ps(w2, r0, vs21);
r0 = _mm256_load_ps(rptr + vecsize_aligned*2);
vs02 = _mm256_fmadd_ps(w0, r0, vs02);
vs12 = _mm256_fmadd_ps(w1, r0, vs12);
vs22 = _mm256_fmadd_ps(w2, r0, vs22);
r0 = _mm256_load_ps(rptr + vecsize_aligned*3);
vs03 = _mm256_fmadd_ps(w0, r0, vs03);
vs13 = _mm256_fmadd_ps(w1, r0, vs13);
vs23 = _mm256_fmadd_ps(w2, r0, vs23);
}
__m256 t0 = _mm256_hadd_ps(_mm256_hadd_ps(vs00, vs01), _mm256_hadd_ps(vs02, vs03));
__m256 t1 = _mm256_hadd_ps(_mm256_hadd_ps(vs10, vs11), _mm256_hadd_ps(vs12, vs13));
__m256 t2 = _mm256_hadd_ps(_mm256_hadd_ps(vs20, vs21), _mm256_hadd_ps(vs22, vs23));
t0 = _mm256_add_ps(t0, _mm256_permute2f128_ps(t0, t0, 1));
t1 = _mm256_add_ps(t1, _mm256_permute2f128_ps(t1, t1, 1));
t2 = _mm256_add_ps(t2, _mm256_permute2f128_ps(t2, t2, 1));
__m256 s0, s1, s2;
if( initOutput )
{
s0 = _mm256_set1_ps(bias0);
s1 = _mm256_set1_ps(bias1);
s2 = _mm256_set1_ps(bias2);
}
else
{
s0 = _mm256_castps128_ps256(_mm_loadu_ps(outptr0 + j));
s1 = _mm256_castps128_ps256(_mm_loadu_ps(outptr1 + j));
s2 = _mm256_castps128_ps256(_mm_loadu_ps(outptr2 + j));
}
s0 = _mm256_add_ps(s0, t0);
s1 = _mm256_add_ps(s1, t1);
s2 = _mm256_add_ps(s2, t2);
if( relu )
{
__m256 m0 = _mm256_cmp_ps(s0, z, _CMP_GT_OS);
__m256 m1 = _mm256_cmp_ps(s1, z, _CMP_GT_OS);
__m256 m2 = _mm256_cmp_ps(s2, z, _CMP_GT_OS);
s0 = _mm256_xor_ps(s0, _mm256_andnot_ps(m0, _mm256_xor_ps(_mm256_mul_ps(s0, vr0), s0)));
s1 = _mm256_xor_ps(s1, _mm256_andnot_ps(m1, _mm256_xor_ps(_mm256_mul_ps(s1, vr1), s1)));
s2 = _mm256_xor_ps(s2, _mm256_andnot_ps(m2, _mm256_xor_ps(_mm256_mul_ps(s2, vr2), s2)));
}
_mm_storeu_ps(outptr0 + j, _mm256_castps256_ps128(s0));
_mm_storeu_ps(outptr1 + j, _mm256_castps256_ps128(s1));
_mm_storeu_ps(outptr2 + j, _mm256_castps256_ps128(s2));
}
for( ; j < blockSize; j++ )
{
const float* rptr = rowbuf + j*vecsize_aligned;
float s00, s10, s20;
if( initOutput )
{
s00 = bias0;
s10 = bias1;
s20 = bias2;
}
else
{
s00 = outptr0[j];
s10 = outptr1[j];
s20 = outptr2[j];
}
for( int k = 0; k < vecsize; k++ )
{
float r0 = rptr[k];
s00 += wptr0[k]*r0;
s10 += wptr1[k]*r0;
s20 += wptr2[k]*r0;
}
if( relu )
{
s00 = s00 > 0.f ? s00 : s00*r0;
s10 = s10 > 0.f ? s10 : s10*r1;
s20 = s20 > 0.f ? s20 : s20*r2;
}
outptr0[j] = s00;
outptr1[j] = s10;
outptr2[j] = s20;
}
}
_mm256_zeroupper();
}
// dst = vec * weights^t + bias
void fastGEMM1T_avx2( const float* vec, const float* weights,
size_t wstep, const float* bias,
float* dst, int nvecs, int vecsize )
{
int i = 0;
for( ; i <= nvecs - 8; i += 8 )
{
const float* wptr = weights + i*wstep;
__m256 vs0 = _mm256_setzero_ps(), vs1 = _mm256_setzero_ps(),
vs2 = _mm256_setzero_ps(), vs3 = _mm256_setzero_ps(),
vs4 = _mm256_setzero_ps(), vs5 = _mm256_setzero_ps(),
vs6 = _mm256_setzero_ps(), vs7 = _mm256_setzero_ps();
for( int k = 0; k < vecsize; k += 8, wptr += 8 )
{
__m256 v = _mm256_load_ps(vec + k);
vs0 = _mm256_fmadd_ps(_mm256_load_ps(wptr), v, vs0);
vs1 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep), v, vs1);
vs2 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*2), v, vs2);
vs3 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*3), v, vs3);
vs4 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*4), v, vs4);
vs5 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*5), v, vs5);
vs6 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*6), v, vs6);
vs7 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*7), v, vs7);
}
__m256 s0 = _mm256_hadd_ps(_mm256_hadd_ps(vs0, vs1), _mm256_hadd_ps(vs2, vs3));
__m256 s1 = _mm256_hadd_ps(_mm256_hadd_ps(vs4, vs5), _mm256_hadd_ps(vs6, vs7));
s0 = _mm256_add_ps(s0, _mm256_permute2f128_ps(s0, s0, 1));
s1 = _mm256_add_ps(s1, _mm256_permute2f128_ps(s1, s1, 1));
s0 = _mm256_add_ps(s0, _mm256_castps128_ps256(_mm_loadu_ps(bias + i)));
s1 = _mm256_add_ps(s1, _mm256_castps128_ps256(_mm_loadu_ps(bias + i + 4)));
_mm_storeu_ps(dst + i, _mm256_castps256_ps128(s0));
_mm_storeu_ps(dst + i + 4, _mm256_castps256_ps128(s1));
}
float temp = 0.f;
for( ; i < nvecs; i++ )
{
const float* wptr = weights + i*wstep;
__m256 vs0 = _mm256_setzero_ps();
for( int k = 0; k < vecsize; k += 8, wptr += 8 )
{
__m256 v = _mm256_load_ps(vec + k);
vs0 = _mm256_fmadd_ps(_mm256_load_ps(wptr), v, vs0);
}
__m256 s0 = _mm256_hadd_ps(_mm256_hadd_ps(vs0, vs0), vs0);
s0 = _mm256_add_ps(s0, _mm256_permute2f128_ps(s0, s0, 1));
_mm_store_ss(&temp, _mm256_castps256_ps128(s0));
dst[i] = temp + bias[i];
}
_mm256_zeroupper();
}
void fastGEMM_avx2( const float* aptr, size_t astep, const float* bptr,
size_t bstep, float* cptr, size_t cstep,
int ma, int na, int nb )
{
int n = 0;
for( ; n <= nb - 16; n += 16 )
{
for( int m = 0; m < ma; m += 4 )
{
const float* aptr0 = aptr + astep*m;
const float* aptr1 = aptr + astep*std::min(m+1, ma-1);
const float* aptr2 = aptr + astep*std::min(m+2, ma-1);
const float* aptr3 = aptr + astep*std::min(m+3, ma-1);
float* cptr0 = cptr + cstep*m;
float* cptr1 = cptr + cstep*std::min(m+1, ma-1);
float* cptr2 = cptr + cstep*std::min(m+2, ma-1);
float* cptr3 = cptr + cstep*std::min(m+3, ma-1);
__m256 d00 = _mm256_setzero_ps(), d01 = _mm256_setzero_ps();
__m256 d10 = _mm256_setzero_ps(), d11 = _mm256_setzero_ps();
__m256 d20 = _mm256_setzero_ps(), d21 = _mm256_setzero_ps();
__m256 d30 = _mm256_setzero_ps(), d31 = _mm256_setzero_ps();
for( int k = 0; k < na; k++ )
{
__m256 a0 = _mm256_set1_ps(aptr0[k]);
__m256 a1 = _mm256_set1_ps(aptr1[k]);
__m256 a2 = _mm256_set1_ps(aptr2[k]);
__m256 a3 = _mm256_set1_ps(aptr3[k]);
__m256 b0 = _mm256_loadu_ps(bptr + k*bstep + n);
__m256 b1 = _mm256_loadu_ps(bptr + k*bstep + n + 8);
d00 = _mm256_fmadd_ps(a0, b0, d00);
d01 = _mm256_fmadd_ps(a0, b1, d01);
d10 = _mm256_fmadd_ps(a1, b0, d10);
d11 = _mm256_fmadd_ps(a1, b1, d11);
d20 = _mm256_fmadd_ps(a2, b0, d20);
d21 = _mm256_fmadd_ps(a2, b1, d21);
d30 = _mm256_fmadd_ps(a3, b0, d30);
d31 = _mm256_fmadd_ps(a3, b1, d31);
}
_mm256_storeu_ps(cptr0 + n, d00);
_mm256_storeu_ps(cptr0 + n + 8, d01);
_mm256_storeu_ps(cptr1 + n, d10);
_mm256_storeu_ps(cptr1 + n + 8, d11);
_mm256_storeu_ps(cptr2 + n, d20);
_mm256_storeu_ps(cptr2 + n + 8, d21);
_mm256_storeu_ps(cptr3 + n, d30);
_mm256_storeu_ps(cptr3 + n + 8, d31);
}
}
_mm256_zeroupper();
for( ; n < nb; n++ )
{
for( int m = 0; m < ma; m++ )
{
const float* aptr0 = aptr + astep*m;
float* cptr0 = cptr + cstep*m;
float d0 = 0.f;
for( int k = 0; k < na; k++ )
d0 += aptr0[k]*bptr[k*bstep + n];
cptr0[n] = d0;
}
}
}
}
}
#endif

View File

@ -0,0 +1,205 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "layers_common.hpp"
namespace cv
{
namespace dnn
{
namespace util
{
std::string makeName(const std::string& str1, const std::string& str2)
{
return str1 + str2;
}
bool getParameter(const LayerParams &params, const std::string& nameBase, const std::string& nameAll,
int &parameterH, int &parameterW, bool hasDefault = false, const int& defaultValue = 0)
{
std::string nameH = makeName(nameBase, std::string("_h"));
std::string nameW = makeName(nameBase, std::string("_w"));
std::string nameAll_ = nameAll;
if(nameAll_ == "")
{
nameAll_ = nameBase;
}
if (params.has(nameH) && params.has(nameW))
{
parameterH = params.get<int>(nameH);
parameterW = params.get<int>(nameW);
return true;
}
else
{
if (params.has(nameAll_))
{
parameterH = parameterW = params.get<int>(nameAll_);
return true;
}
else
{
if(hasDefault)
{
parameterH = parameterW = defaultValue;
return true;
}
else
{
return false;
}
}
}
}
void getKernelSize(const LayerParams &params, int &kernelH, int &kernelW)
{
if(!util::getParameter(params, "kernel", "kernel_size", kernelH, kernelW))
{
CV_Error(cv::Error::StsBadArg, "kernel_size (or kernel_h and kernel_w) not specified");
}
CV_Assert(kernelH > 0 && kernelW > 0);
}
void getStrideAndPadding(const LayerParams &params, int &padH, int &padW, int &strideH, int &strideW, cv::String& padMode)
{
util::getParameter(params, "pad", "pad", padH, padW, true, 0);
util::getParameter(params, "stride", "stride", strideH, strideW, true, 1);
padMode = "";
if (params.has("pad_mode"))
{
padMode = params.get<String>("pad_mode");
}
CV_Assert(padH >= 0 && padW >= 0 && strideH > 0 && strideW > 0);
}
}
void getPoolingKernelParams(const LayerParams &params, int &kernelH, int &kernelW, bool &globalPooling,
int &padH, int &padW, int &strideH, int &strideW, cv::String &padMode)
{
util::getStrideAndPadding(params, padH, padW, strideH, strideW, padMode);
globalPooling = params.has("global_pooling");
if (globalPooling)
{
if(params.has("kernel_h") || params.has("kernel_w") || params.has("kernel_size"))
{
CV_Error(cv::Error::StsBadArg, "In global_pooling mode, kernel_size (or kernel_h and kernel_w) cannot be specified");
}
if(padH != 0 || padW != 0 || strideH != 1 || strideW != 1)
{
CV_Error(cv::Error::StsBadArg, "In global_pooling mode, pad_h and pad_w must be = 0, and stride_h and stride_w must be = 1");
}
}
else
{
util::getKernelSize(params, kernelH, kernelW);
}
}
void getConvolutionKernelParams(const LayerParams &params, int &kernelH, int &kernelW, int &padH, int &padW,
int &strideH, int &strideW, int &dilationH, int &dilationW, cv::String &padMode)
{
util::getKernelSize(params, kernelH, kernelW);
util::getStrideAndPadding(params, padH, padW, strideH, strideW, padMode);
util::getParameter(params, "dilation", "dilation", dilationH, dilationW, true, 1);
CV_Assert(dilationH > 0 && dilationW > 0);
}
// From TensorFlow code:
// Total padding on rows and cols is
// Pr = (R' - 1) * S + Kr - R
// Pc = (C' - 1) * S + Kc - C
// where (R', C') are output dimensions, (R, C) are input dimensions, S
// is stride, (Kr, Kc) are filter dimensions.
// We pad Pr/2 on the left and Pr - Pr/2 on the right, Pc/2 on the top
// and Pc - Pc/2 on the bottom. When Pr or Pc is odd, this means
// we pad more on the right and bottom than on the top and left.
void getConvPoolOutParams(const Size& inp, const Size &kernel,
const Size &stride, const String &padMode,
Size& out)
{
if (padMode == "VALID")
{
out.height = (inp.height - kernel.height + stride.height) / stride.height;
out.width = (inp.width- kernel.width + stride.width) / stride.width;
}
else if (padMode == "SAME")
{
out.height = (inp.height - 1 + stride.height) / stride.height;
out.width = (inp.width - 1 + stride.width) / stride.width;
}
else
{
CV_Error(Error::StsError, "Unsupported padding mode");
}
}
void getConvPoolPaddings(const Size& inp, const Size& out,
const Size &kernel, const Size &stride,
const String &padMode, Size &pad)
{
if (padMode == "VALID")
{
pad = cv::Size(0,0);
}
else if (padMode == "SAME")
{
int Ph = std::max(0, (out.height - 1) * stride.height + kernel.height - inp.height);
int Pw = std::max(0, (out.width - 1) * stride.width + kernel.width - inp.width);
// For odd values of total padding, add more padding at the 'right'
// side of the given dimension.
pad = cv::Size(Pw / 2, Ph / 2);
}
}
}
}

View File

@ -0,0 +1,87 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_LAYERS_LAYERS_COMMON_HPP__
#define __OPENCV_DNN_LAYERS_LAYERS_COMMON_HPP__
#include <opencv2/dnn.hpp>
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
void getConvolutionKernelParams(const LayerParams &params, int &kernelH, int &kernelW, int &padH, int &padW,
int &strideH, int &strideW, int &dilationH, int &dilationW, cv::String& padMode);
void getPoolingKernelParams(const LayerParams &params, int &kernelH, int &kernelW, bool &globalPooling,
int &padH, int &padW, int &strideH, int &strideW, cv::String& padMode);
void getConvPoolOutParams(const Size& inp, const Size &kernel,
const Size &stride, const String &padMode,
Size& out);
void getConvPoolPaddings(const Size& inp, const Size& out,
const Size &kernel, const Size &stride,
const String &padMode, Size &pad);
#if CV_SSE2
#define CV_DNN_TRY_AVX2 1
void fastConv_avx2(const float* weights, size_t wstep, const float* bias,
const float* rowbuf, float* output, const int* outShape,
int blockSize, int vecsize, int vecsize_aligned,
const float* relu, bool initOutput);
void fastGEMM1T_avx2( const float* vec, const float* weights,
size_t wstep, const float* bias,
float* dst, int nvecs, int vecsize );
void fastGEMM_avx2( const float* aptr, size_t astep, const float* bptr0,
size_t bstep, float* cptr, size_t cstep,
int ma, int na, int nb );
#else
#define CV_DNN_TRY_AVX2 0
#endif
}
}
#endif

View File

@ -0,0 +1,341 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "op_halide.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/dnn/shape_utils.hpp"
#include "opencv2/core/hal/hal.hpp"
#include <algorithm>
namespace cv
{
namespace dnn
{
class LRNLayerImpl : public LRNLayer
{
public:
LRNLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
type = -1;
String nrmType = params.get<String>("norm_region", "ACROSS_CHANNELS");
if (nrmType == "ACROSS_CHANNELS")
type = LRNLayer::CHANNEL_NRM;
else if (nrmType == "WITHIN_CHANNEL")
type = LRNLayer::SPATIAL_NRM;
else
CV_Error(Error::StsBadArg, "Unknown region type \"" + nrmType + "\"");
size = params.get<int>("local_size", 5);
if (size % 2 != 1 || size <= 0)
CV_Error(Error::StsBadArg, "LRN layer supports only positive odd values for local_size");
alpha = params.get<double>("alpha", 1);
beta = params.get<double>("beta", 0.75);
bias = params.get<double>("bias", 1);
normBySize = params.get<bool>("norm_by_size", true);
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide();
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
CV_Assert(inputs.size() == outputs.size());
for (int i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->dims == 4);
Mat &src = *inputs[i];
Mat &dst = outputs[i];
switch (type)
{
case CHANNEL_NRM:
channelNormalization(src, dst);
break;
case SPATIAL_NRM:
spatialNormalization(src, dst);
break;
default:
CV_Error(Error::StsNotImplemented, "Unimplemented mode of LRN layer");
break;
}
}
}
class ChannelLRN : public ParallelLoopBody
{
public:
ChannelLRN(const float* src, float* dst, int channels, int ksize,
float alpha1, float bias1, float beta1,
size_t planeSize, int nsamples, int nstripes)
{
src_ = src; dst_ = dst;
channels_ = channels;
ksize_ = ksize;
alpha1_ = alpha1; bias1_ = bias1; beta1_ = beta1;
planeSize_ = planeSize; nsamples_ = nsamples; nstripes_ = nstripes;
}
void operator()(const Range& r) const
{
int nsamples = nsamples_, nstripes = nstripes_;
size_t planeSize = planeSize_, planeSize_n = planeSize * nsamples;
size_t elemsPerStripe = (planeSize_n + nstripes - 1)/nstripes;
size_t rstart = r.start*elemsPerStripe;
size_t rend = r.end == nstripes ? planeSize_n : r.end*elemsPerStripe;
rstart = std::min(rstart, planeSize_n);
rend = std::min(rend, planeSize_n);
float alpha1 = alpha1_, bias1 = bias1_, beta1 = beta1_;
int k, channels = channels_, ksize = ksize_;
AutoBuffer<float> buf_((channels + ksize*2 + 4)*2);
float* acc = (float*)buf_;
float* buf = acc + channels + ksize + 1;
for( k = 0; k <= ksize; k++ )
buf[-k-1] = buf[channels + k] = 0.f;
for( size_t ofs = rstart; ofs < rend; )
{
int sampleIdx = (int)(ofs/planeSize);
if( sampleIdx >= nsamples )
break;
size_t ofs0 = ofs - sampleIdx*planeSize;
size_t ofs1 = std::min(planeSize - ofs0, rend - ofs) + ofs;
const float* src = src_ + sampleIdx*planeSize*channels + ofs0;
float* dst = dst_ + sampleIdx*planeSize*channels + ofs0;
for( ; ofs < ofs1; ofs++, src++, dst++ )
{
for( k = 0; k < channels; k++ )
buf[k] = src[k*planeSize];
float s = 0;
for( k = 0; k < ksize; k++ )
s += buf[k]*buf[k];
for( k = 0; k < channels; k++ )
{
float x1 = buf[k + ksize];
float x0 = buf[k - ksize - 1];
s = std::max(s + (x1 + x0)*(x1 - x0), 0.f);
acc[k] = (float)(alpha1*s + bias1);
}
hal::log32f(acc, acc, channels);
for( k = 0; k < channels; k++ )
acc[k] *= beta1;
hal::exp32f(acc, acc, channels);
for( k = 0; k < channels; k++ )
dst[k*planeSize] = buf[k]*acc[k];
}
}
}
const float* src_;
float* dst_;
float alpha1_, bias1_, beta1_;
size_t planeSize_;
int channels_, ksize_, nsamples_, nstripes_;
};
void channelNormalization(Mat &srcBlob, Mat &dstBlob)
{
int num = srcBlob.size[0];
int channels = srcBlob.size[1];
int ksize = (size - 1) / 2;
int sizeNormFactor = normBySize ? size : 1;
size_t planeSize = srcBlob.size[2]*srcBlob.size[3];
int nstripes = std::max(getNumThreads(), 1);
ChannelLRN clrn(srcBlob.ptr<float>(), dstBlob.ptr<float>(), channels,
ksize, alpha/sizeNormFactor, bias, -beta, planeSize, num, nstripes);
parallel_for_(Range(0, nstripes), clrn, nstripes);
}
void sqrBoxFilter_(const Mat &src, Mat &dst)
{
Mat srcRawWrapper(src.rows, src.cols, src.type(), src.data, src.step[0]);
cv::sqrBoxFilter(srcRawWrapper, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT);
}
void spatialNormalization(Mat &srcBlob, Mat &dstBlob)
{
int num = srcBlob.size[0];
int channels = srcBlob.size[1];
int sizeNormFactor = normBySize ? size*size : 1;
Mat srcMat = srcBlob;
Mat dstMat = dstBlob;
for (int n = 0; n < num; n++)
{
for (int cn = 0; cn < channels; cn++)
{
Mat src = getPlane(srcMat, n, cn);
Mat dst = getPlane(dstMat, n, cn);
sqrBoxFilter_(src, dst);
dst.convertTo(dst, dst.type(), alpha/sizeNormFactor, bias);
cv::pow(dst, beta, dst);
cv::divide(src, dst, dst);
}
}
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
#ifdef HAVE_HALIDE
float alphaSize = alpha;
if (normBySize)
alphaSize /= (type == CHANNEL_NRM ? size : size * size);
int width, height, channels, numImgs;
Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
getCanonicalSize(inputBuffer, &width, &height, &channels, &numImgs);
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::Func padded_sq(name + "_padded_sq");
Halide::Func sq("sq");
sq(x, y, c, n) = inputBuffer(x, y, c, n) * inputBuffer(x, y, c, n);
Halide::Func bounded =
Halide::BoundaryConditions::constant_exterior(sq, 0, 0, width,
0, height,
0, channels,
0, numImgs);
padded_sq(x, y, c, n) = bounded(x, y, c, n);
Halide::Expr base;
if (type == CHANNEL_NRM)
{
Halide::RDom r((1 - size) / 2, size);
base = alphaSize * sum(padded_sq(x, y, c + r, n));
}
else // SPATIAL_NRM
{
Halide::RDom r((1 - size) / 2, size, (1 - size) / 2, size);
base = alphaSize * sum(padded_sq(x + r.x, y + r.y, c, n));
}
base += static_cast<float>(bias);
top(x, y, c, n) = inputBuffer(x, y, c, n) / pow(base, beta);
return Ptr<BackendNode>(new HalideBackendNode({ padded_sq, top }));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
virtual void applyHalideScheduler(Ptr<BackendNode>& node,
const std::vector<Mat*> &inputs,
const std::vector<Mat> &outputs,
int targetId) const
{
#ifdef HAVE_HALIDE
if (targetId != DNN_TARGET_CPU)
{
Layer::applyHalideScheduler(node, inputs, outputs, targetId);
return;
}
int outW, outH, outC, outN;
getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
Halide::Var x("x"), y("y"), c("c"), n("n"), yo("yo"), yi("yi"), tile("tile");
Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs[1];
Halide::Func& padded_sq = node.dynamicCast<HalideBackendNode>()->funcs[0];
if (outW < 8 || outH <= 2)
return;
top.reorder(x, c, y, n)
.split(y, yo, yi, 2)
.fuse(yo, n, tile)
.parallel(tile)
.unroll(yi)
.vectorize(x, 8);
padded_sq.store_at(top, tile)
.compute_at(top, yi);
#endif // HAVE_HALIDE
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
CV_Assert(inputs.size() > 0);
long flops = 0;
for(int i = 0; i < inputs.size(); i++)
{
if (type == CHANNEL_NRM)
{
int channels = inputs[i][1];
int ksize = (size - 1) / 2;
flops += inputs[i][0]*(std::min(ksize, channels)*2*total(inputs[i], 2) + channels*4*total(inputs[i], 2));
if (ksize < channels)
{
flops += (size + 2*(channels - size))*total(inputs[i], 2);
}
}
else
{
flops += total(inputs[i])*(2*size*size + 2);
}
}
return flops;
}
};
Ptr<LRNLayer> LRNLayer::create(const LayerParams& params)
{
return Ptr<LRNLayer>(new LRNLayerImpl(params));
}
}
}

View File

@ -0,0 +1,128 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of Batch Normalization layer.
*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "op_halide.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class MaxUnpoolLayerImpl : public MaxUnpoolLayer
{
public:
MaxUnpoolLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
poolKernel = Size(params.get<int>("pool_k_w"), params.get<int>("pool_k_h"));
poolPad = Size(params.get<int>("pool_pad_w"), params.get<int>("pool_pad_h"));
poolStride = Size(params.get<int>("pool_stride_w"), params.get<int>("pool_stride_h"));
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() &&
!poolPad.width && !poolPad.height;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() == 2);
CV_Assert(total(inputs[0]) == total(inputs[1]));
MatShape outShape = inputs[0];
outShape[2] = (outShape[2] - 1) * poolStride.height + poolKernel.height - 2 * poolPad.height;
outShape[3] = (outShape[3] - 1) * poolStride.width + poolKernel.width - 2 * poolPad.width;
outputs.clear();
outputs.push_back(outShape);
return false;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
CV_Assert(inputs.size() == 2);
Mat& input = *inputs[0];
Mat& indices = *inputs[1];
CV_Assert(input.total() == indices.total());
CV_Assert(input.size[0] == 1);
CV_Assert(input.isContinuous());
for(int i_n = 0; i_n < outputs.size(); i_n++)
{
Mat& outBlob = outputs[i_n];
outBlob.setTo(0);
CV_Assert(input.size[1] == outBlob.size[1]);
int outPlaneTotal = outBlob.size[2]*outBlob.size[3];
for (int i_c = 0; i_c < input.size[1]; i_c++)
{
Mat outPlane = getPlane(outBlob, 0, i_c);
int wh_area = input.size[2]*input.size[3];
const float* inptr = input.ptr<float>(0, i_c);
const float* idxptr = indices.ptr<float>(0, i_c);
float* outptr = outPlane.ptr<float>();
for(int i_wh = 0; i_wh < wh_area; i_wh++)
{
int index = idxptr[i_wh];
CV_Assert(0 <= index && index < outPlaneTotal);
outptr[index] = inptr[i_wh];
}
}
}
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input)
{
#ifdef HAVE_HALIDE
// Meaningless operation if false because if kernel > stride
// it is not deterministic and if kernel < stride we just
// skip a part of input data (you'd better change your model).
if (poolKernel.width != poolStride.width ||
poolKernel.height != poolStride.height)
CV_Error(cv::Error::StsNotImplemented,
"Halide backend for maximum unpooling "
"is not support cases when kernel != stride");
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::Buffer<float> inputBuffer = halideBuffer(input[0]);
Halide::Buffer<float> indices = halideBuffer(input[1]);
Halide::Expr pooledX = x / poolKernel.width;
Halide::Expr pooledY = y / poolKernel.height;
const int outW = inputBuffer.width() * poolKernel.width;
top(x, y, c, n) = select(y * outW + x == indices(pooledX, pooledY, c, n),
inputBuffer(pooledX, pooledY, c, n), 0.0f);
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
};
Ptr<MaxUnpoolLayer> MaxUnpoolLayer::create(const LayerParams& params)
{
return Ptr<MaxUnpoolLayer>(new MaxUnpoolLayerImpl(params));
}
}
}

View File

@ -0,0 +1,108 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class MVNLayerImpl : public MVNLayer
{
public:
MVNLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
normVariance = params.get<bool>("normalize_variance", true);
acrossChannels = params.get<bool>("across_channels", false);
eps = params.get<double>("eps", 1e-9);
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)
{
Mat &inpBlob = *inputs[inpIdx];
Mat &outBlob = outputs[inpIdx];
int splitDim = (acrossChannels) ? 1 : 2;
int i, newRows = 1;
for( i = 0; i < splitDim; i++ )
newRows *= inpBlob.size[i];
Mat inpMat = inpBlob.reshape(1, newRows);
Mat outMat = outBlob.reshape(1, newRows);
Scalar mean, dev;
for ( i = 0; i < newRows; i++)
{
Mat inpRow = inpMat.row(i);
Mat outRow = outMat.row(i);
cv::meanStdDev(inpRow, mean, (normVariance) ? dev : noArray());
double alpha = (normVariance) ? 1/(eps + dev[0]) : 1;
inpRow.convertTo(outRow, outRow.type(), alpha, -mean[0] * alpha);
}
}
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for(int i = 0; i < inputs.size(); i++)
{
flops += 6*total(inputs[i]) + 3*total(inputs[i], 0, normVariance ? 2 : 1);
}
return flops;
}
};
Ptr<MVNLayer> MVNLayer::create(const LayerParams& params)
{
return Ptr<MVNLayer>(new MVNLayerImpl(params));
}
}
}

View File

@ -0,0 +1,223 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <float.h>
#include <algorithm>
namespace cv
{
namespace dnn
{
namespace
{
const std::string layerName = "NormalizeBBox";
}
class NormalizeBBoxLayerImpl : public NormalizeBBoxLayer
{
float _eps;
bool _across_spatial;
bool _channel_shared;
public:
bool getParameterDict(const LayerParams &params,
const std::string &parameterName,
DictValue& result)
{
if (!params.has(parameterName))
{
return false;
}
result = params.get(parameterName);
return true;
}
template<typename T>
T getParameter(const LayerParams &params,
const std::string &parameterName,
const size_t &idx=0,
const bool required=true,
const T& defaultValue=T())
{
DictValue dictValue;
bool success = getParameterDict(params, parameterName, dictValue);
if(!success)
{
if(required)
{
std::string message = layerName;
message += " layer parameter does not contain ";
message += parameterName;
message += " parameter.";
CV_Error(Error::StsBadArg, message);
}
else
{
return defaultValue;
}
}
return dictValue.get<T>(idx);
}
NormalizeBBoxLayerImpl(const LayerParams &params)
{
_eps = getParameter<float>(params, "eps", 0, false, 1e-10f);
_across_spatial = getParameter<bool>(params, "across_spatial");
_channel_shared = getParameter<bool>(params, "channel_shared");
setParamsFrom(params);
}
void checkInputs(const std::vector<Mat*> &inputs)
{
CV_Assert(inputs.size() > 0);
CV_Assert(inputs[0]->dims == 4 && inputs[0]->type() == CV_32F);
for (size_t i = 1; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->dims == 4 && inputs[i]->type() == CV_32F);
CV_Assert(inputs[i]->size == inputs[0]->size);
}
CV_Assert(inputs[0]->dims > 2);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
size_t channels = inputs[0][1];
size_t rows = inputs[0][2];
size_t cols = inputs[0][3];
size_t channelSize = rows * cols;
internals.assign(1, shape(channels, channelSize));
internals.push_back(shape(channels, 1));
internals.push_back(shape(1, channelSize));
return inplace;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
checkInputs(inputs);
Mat& buffer = internals[0], sumChannelMultiplier = internals[1],
sumSpatialMultiplier = internals[2];
sumChannelMultiplier.setTo(1.0);
sumSpatialMultiplier.setTo(1.0);
const Mat& inp0 = *inputs[0];
size_t num = inp0.size[0];
size_t channels = inp0.size[1];
size_t channelSize = inp0.size[2] * inp0.size[3];
Mat zeroBuffer(channels, channelSize, CV_32F, Scalar(0));
Mat absDiff;
Mat scale = blobs[0];
for (size_t j = 0; j < inputs.size(); j++)
{
for (size_t n = 0; n < num; ++n)
{
Mat src = Mat(channels, channelSize, CV_32F, inputs[j]->ptr<float>(n));
Mat dst = Mat(channels, channelSize, CV_32F, outputs[j].ptr<float>(n));
buffer = src.mul(src);
if (_across_spatial)
{
absdiff(buffer, zeroBuffer, absDiff);
// add eps to avoid overflow
double absSum = sum(absDiff)[0] + _eps;
float norm = sqrt(absSum);
dst = src / norm;
}
else
{
Mat norm(channelSize, 1, buffer.type()); // 1 x channelSize
// (_channels x channelSize)T * _channels x 1 -> channelSize x 1
gemm(buffer, sumChannelMultiplier, 1, norm, 0, norm, GEMM_1_T);
// compute norm
pow(norm, 0.5f, norm);
// scale the layer
// _channels x 1 * (channelSize x 1)T -> _channels x channelSize
gemm(sumChannelMultiplier, norm, 1, buffer, 0, buffer, GEMM_2_T);
dst = src / buffer;
}
// scale the output
if (_channel_shared)
{
// _scale: 1 x 1
dst *= scale.at<float>(0, 0);
}
else
{
// _scale: _channels x 1
// _channels x 1 * 1 x channelSize -> _channels x channelSize
gemm(scale, sumSpatialMultiplier, 1, buffer, 0, buffer);
dst = dst.mul(buffer);
}
}
}
}
};
Ptr<NormalizeBBoxLayer> NormalizeBBoxLayer::create(const LayerParams &params)
{
return Ptr<NormalizeBBoxLayer>(new NormalizeBBoxLayerImpl(params));
}
}
}

View File

@ -0,0 +1,131 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of padding layer, which adds paddings to input blob.
*/
#include "../precomp.hpp"
#include "op_halide.hpp"
#include <vector>
namespace cv
{
namespace dnn
{
class PaddingLayerImpl : public PaddingLayer
{
public:
PaddingLayerImpl(const LayerParams &params)
{
setParamsFrom(params);
paddingDim = params.get<int>("padding_dim");
padding = abs(params.get<int>("padding"));
inputDims = params.get<int>("input_dims", 0);
index = params.get<int>("index", 0);
paddingValue = params.get<double>("value", 0);
if(paddingDim < 0 || padding < 0)
CV_Error(cv::Error::StsNotImplemented, "Negative padding and dim aren't supported");
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
outputs.clear();
for(int i = 0; i < inputs.size(); i++)
{
MatShape shape = inputs[i];
int dim = getPadDim(shape);
CV_Assert(dim < shape.size());
shape[dim] += padding;
outputs.push_back(shape);
}
return false;
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide();
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for(int i = 0; i < inputs.size(); i++)
{
outputs[i] = paddingValue;
const Mat& inp = *inputs[i];
Mat& out = outputs[i];
int dims = inp.dims;
MatShape inShape(inp.size.p, inp.size.p + dims);
MatShape outShape(out.size.p, out.size.p + dims);
int dim = getPadDim(inShape);
int actualIndex = index;
if(index == 0)
actualIndex = inShape[dim];
std::vector<std::pair<Range, Range> > srcDstRanges;
srcDstRanges.push_back(std::make_pair(Range(0, actualIndex), Range(0, actualIndex)));
srcDstRanges.push_back(std::make_pair(Range(actualIndex, inShape[dim]),
Range(actualIndex + padding, outShape[dim])));
std::vector<Range> srcRanges(dims, Range::all()), dstRanges = srcRanges;
for(int j = 0; j < srcDstRanges.size(); j++)
{
if(!srcDstRanges[j].first.empty())
{
srcRanges[dim] = srcDstRanges[j].first;
dstRanges[dim] = srcDstRanges[j].second;
Mat dst = out(&dstRanges[0]);
Mat src = inp(&srcRanges[0]).clone();
src.copyTo(dst);
}
}
}
}
int getPadDim(const MatShape& shape) const
{
return inputDims > 0 && (int)shape.size() > inputDims ? paddingDim + 1 : paddingDim;
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
#ifdef HAVE_HALIDE
int inW, inH, inC, inN;
Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN);
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::Func padded =
Halide::BoundaryConditions::constant_exterior(inputBuffer, paddingValue);
top(x, y, c, n) = padded(x, y, c, n);
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
int paddingDim, padding, inputDims, index;
float paddingValue;
};
Ptr<PaddingLayer> PaddingLayer::create(const LayerParams &params)
{
return Ptr<PaddingLayer>(new PaddingLayerImpl(params));
}
}
}

View File

@ -0,0 +1,243 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <float.h>
#include <algorithm>
namespace cv
{
namespace dnn
{
class PermuteLayerImpl : public PermuteLayer
{
public:
void checkCurrentOrder(int currentOrder)
{
if(currentOrder < 0 || currentOrder > 3)
{
CV_Error(
Error::StsBadArg,
"Orders of dimensions in Permute layer parameter"
"must be in [0...3] interval");
}
if(std::find(_order.begin(), _order.end(), currentOrder) != _order.end())
{
CV_Error(Error::StsBadArg,
"Permute layer parameter contains duplicated orders.");
}
}
void checkNeedForPermutation()
{
_needsPermute = false;
for (size_t i = 0; i < _numAxes; ++i)
{
if (_order[i] != i)
{
_needsPermute = true;
break;
}
}
}
PermuteLayerImpl(const LayerParams &params)
{
if (!params.has("order"))
{
_needsPermute = false;
return;
}
DictValue paramOrder = params.get("order");
if(paramOrder.size() > 4)
{
CV_Error(
Error::StsBadArg,
"Too many (> 4) orders of dimensions in Permute layer");
}
_numAxes = paramOrder.size();
for (size_t i = 0; i < _numAxes; i++)
{
int currentOrder = paramOrder.get<int>(i);
checkCurrentOrder(currentOrder);
_order.push_back(currentOrder);
}
setParamsFrom(params);
checkNeedForPermutation();
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
if(!_needsPermute)
return true;
CV_Assert(inputs.size() > 0);
CV_Assert((int)_numAxes == inputs[0].size());
MatShape shapeBefore = inputs[0], shapeAfter;
for (size_t i = 0; i < _numAxes; i++)
{
shapeAfter.push_back(shapeBefore[_order[i]]);
}
outputs.clear();
for (size_t i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i][2] == shapeBefore[2] && inputs[i][3] == shapeBefore[3]);
CV_Assert(total(inputs[i]) == total(shapeAfter));
outputs.push_back(shapeAfter);
}
return false;
}
void computeStrides(const MatShape &shapeBefore, const MatShape &shapeAfter)
{
_oldStride.resize(_numAxes);
_newStride.resize(_numAxes);
_oldStride[_numAxes - 1] = 1;
_newStride[_numAxes - 1] = 1;
for(int i = _numAxes - 2; i >= 0; i--)
{
_oldStride[i] = _oldStride[i + 1] * shapeBefore[i + 1];
_newStride[i] = _newStride[i + 1] * shapeAfter[i + 1];
}
_count = _oldStride[0] * shapeBefore[0];
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
{
if(!_needsPermute)
{
return;
}
CV_Assert(inputs.size() > 0);
const Mat& inp0 = *inputs[0];
CV_Assert((int)_numAxes == inp0.dims);
computeStrides(shape(*inputs[0]), shape(outputs[0]));
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
size_t k, ninputs = inputs.size();
if(!_needsPermute)
{
for (k = 0; k < ninputs; k++)
outputs[k] = *inputs[k];
}
else
{
size_t i, j, count = _count, numAxes = _numAxes;
const size_t* newStride = &_newStride[0];
const size_t* oldStride = &_oldStride[0];
const size_t* order = &_order[0];
for (k = 0; k < ninputs; k++)
{
const Mat& inp = *inputs[k];
Mat& out = outputs[k];
CV_Assert(inp.dims == numAxes && inp.size == inputs[0]->size);
CV_Assert(out.dims == numAxes && out.size == outputs[0].size);
// for( i = 0; i < numAxes; i++ )
// {
// CV_Assert(inp.size[i] == _oldDimensionSize[i]);
// CV_Assert(out.size[i] == _newDimensionSize[i]);
// }
CV_Assert(inp.isContinuous() && out.isContinuous());
CV_Assert(inp.type() == CV_32F && out.type() == CV_32F);
const float *srcData = inp.ptr<float>();
float *dstData = out.ptr<float>();
for (i = 0; i < count; ++i)
{
size_t oldPosition = 0;
size_t newPosition = i;
for (j = 0; j < numAxes; ++j)
{
oldPosition += (newPosition / newStride[j]) * oldStride[order[j]];
newPosition %= newStride[j];
}
dstData[i] = srcData[oldPosition];
}
}
}
}
size_t _count;
std::vector<size_t> _order;
std::vector<int> _oldDimensionSize;
std::vector<int> _newDimensionSize;
std::vector<size_t> _oldStride;
std::vector<size_t> _newStride;
bool _needsPermute;
size_t _numAxes;
};
Ptr<PermuteLayer> PermuteLayer::create(const LayerParams &params)
{
return Ptr<PermuteLayer>(new PermuteLayerImpl(params));
}
}
}

View File

@ -0,0 +1,566 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include "op_halide.hpp"
#include <float.h>
#include <algorithm>
using std::max;
using std::min;
namespace cv
{
namespace dnn
{
//TODO: add ceil_mode param
class PoolingLayerImpl : public PoolingLayer
{
public:
PoolingLayerImpl(const LayerParams& params)
{
type = PoolingLayer::MAX;
computeMaxIdx = true;
if (params.has("pool"))
{
String pool = params.get<String>("pool").toLowerCase();
if (pool == "max")
type = PoolingLayer::MAX;
else if (pool == "ave")
type = PoolingLayer::AVE;
else if (pool == "stochastic")
type = PoolingLayer::STOCHASTIC;
else
CV_Error(Error::StsBadArg, "Unknown pooling type \"" + pool + "\"");
}
getPoolingKernelParams(params, kernel.height, kernel.width, globalPooling,
pad.height, pad.width, stride.height, stride.width, padMode);
setParamsFrom(params);
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
{
CV_Assert(inputs.size() == 1);
cv::Size inp(inputs[0]->size[3], inputs[0]->size[2]),
out(outputs[0].size[3], outputs[0].size[2]);
if(globalPooling)
{
kernel = inp;
}
getConvPoolPaddings(inp, out, kernel, stride, padMode, pad);
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() &&
(type == PoolingLayer::MAX ||
type == PoolingLayer::AVE && !pad.width && !pad.height);
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for (size_t ii = 0; ii < inputs.size(); ii++)
{
switch (type)
{
case MAX:
maxPooling(*inputs[ii], outputs[2 * ii], outputs[2 * ii + 1]);
break;
case AVE:
avePooling(*inputs[ii], outputs[ii]);
break;
default:
CV_Error(Error::StsNotImplemented, "Not implemented");
break;
}
}
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
if (type == PoolingLayer::MAX)
return initMaxPoolingHalide(inputs);
else if (type == PoolingLayer::AVE)
return initAvePoolingHalide(inputs);
else
return Ptr<BackendNode>();
}
class MaxPoolingInvoker : public ParallelLoopBody
{
public:
const Mat* src_;
Mat *dst_, *mask_;
Size kernel_, stride_, pad_;
int nstripes_;
bool computeMaxIdx_;
MaxPoolingInvoker(const Mat& src, Mat& dst, Mat& mask, Size kernel,
Size stride, Size pad, int nstripes, bool computeMaxIdx)
{
src_ = &src;
dst_ = &dst;
mask_ = &mask;
kernel_ = kernel;
stride_ = stride;
pad_ = pad;
nstripes_ = nstripes;
computeMaxIdx_ = computeMaxIdx;
CV_Assert(src.isContinuous() && dst.isContinuous() &&
src.type() == CV_32F && src.type() == dst.type() &&
mask.type() == src.type() && src.dims == 4 && dst.dims == 4 &&
src.size[0] == dst.size[0] && src.size[1] == dst.size[1] &&
mask.size == dst.size);
}
void operator()(const Range& r) const
{
int nimgs = dst_->size[0], channels = dst_->size[1];
int width = dst_->size[3], height = dst_->size[2];
int inp_width = src_->size[3], inp_height = src_->size[2];
size_t total = dst_->total();
size_t stripeSize = (total + nstripes_ - 1)/nstripes_;
size_t stripeStart = r.start*stripeSize;
size_t stripeEnd = std::min(r.end*stripeSize, total);
size_t ofs = stripeStart;
int x0 = (int)(ofs % width);
ofs /= width;
int y0 = (int)(ofs % height);
ofs /= height;
int c = (int)(ofs % channels);
int n = (int)(ofs / channels);
const float *srcData = src_->ptr<float>(n, c);
float *dstData = dst_->ptr<float>(n, c, y0) + x0;
float *dstMaskData = mask_->ptr<float>(n, c, y0) + x0;
int kernel_w = kernel_.width, kernel_h = kernel_.height;
int pad_w = pad_.width, pad_h = pad_.height;
int stride_w = stride_.width, stride_h = stride_.height;
bool compMaxIdx = computeMaxIdx_;
#if CV_SIMD128
v_float32x4 idx00(0.f, (float)stride_w, (float)(stride_w*2), (float)(stride_w*3));
v_float32x4 ones = v_setall_f32(1.f);
v_float32x4 delta = v_setall_f32((float)(inp_width - kernel_w));
#endif
for( ofs = stripeStart; ofs < stripeEnd; ofs++ )
{
int ystart = y0 * stride_h - pad_h;
int xstart = x0 * stride_w - pad_w;
int yend = min(ystart + kernel_h, inp_height);
int xend = min(xstart + kernel_w, inp_width);
ystart = max(ystart, 0);
xstart = max(xstart, 0);
float max_val = -FLT_MAX;
int max_index = -1;
#if CV_SIMD128
if( xstart > 0 && (x0 + 7) * stride_w - pad_w + kernel_w < inp_width )
{
if( compMaxIdx )
{
v_float32x4 max_val0 = v_setall_f32(max_val);
v_float32x4 max_val1 = max_val0;
v_float32x4 max_idx0 = v_setall_f32(-1.f);
v_float32x4 max_idx1 = max_idx0;
int index0 = ystart * inp_width + xstart;
v_float32x4 idx0 = idx00 + v_setall_f32((float)index0);
v_float32x4 idx1 = idx0 + v_setall_f32((float)(stride_w*4));
for (int y = ystart; y < yend; ++y)
{
for (int x = xstart; x < xend; ++x, idx0 += ones, idx1 += ones)
{
const int index = y * inp_width + x;
v_float32x4 v0(srcData[index], srcData[index + stride_w],
srcData[index + stride_w*2], srcData[index + stride_w*3]);
v_float32x4 v1(srcData[index + stride_w*4], srcData[index + stride_w*5],
srcData[index + stride_w*6], srcData[index + stride_w*7]);
max_idx0 = v_select(v0 > max_val0, idx0, max_idx0);
max_idx1 = v_select(v1 > max_val1, idx1, max_idx1);
max_val0 = v_max(max_val0, v0);
max_val1 = v_max(max_val1, v1);
}
idx0 += delta;
idx1 += delta;
}
v_store(dstData, max_val0);
v_store(dstData + 4, max_val1);
v_store(dstMaskData, max_idx0);
v_store(dstMaskData + 4, max_idx1);
ofs += 7;
dstData += 8;
dstMaskData += 8;
x0 += 7;
}
else
{
v_float32x4 max_val0 = v_setall_f32(max_val);
v_float32x4 max_val1 = max_val0;
for (int y = ystart; y < yend; ++y)
{
for (int x = xstart; x < xend; ++x)
{
const int index = y * inp_width + x;
v_float32x4 v0(srcData[index], srcData[index + stride_w],
srcData[index + stride_w*2], srcData[index + stride_w*3]);
v_float32x4 v1(srcData[index + stride_w*4], srcData[index + stride_w*5],
srcData[index + stride_w*6], srcData[index + stride_w*7]);
max_val0 = v_max(max_val0, v0);
max_val1 = v_max(max_val1, v1);
}
}
v_store(dstData, max_val0);
v_store(dstData + 4, max_val1);
ofs += 7;
dstData += 8;
x0 += 7;
}
}
else
#endif
{
if( compMaxIdx )
{
for (int y = ystart; y < yend; ++y)
for (int x = xstart; x < xend; ++x)
{
const int index = y * inp_width + x;
float val = srcData[index];
if (val > max_val)
{
max_val = val;
max_index = index;
}
}
*dstData++ = max_val;
*dstMaskData++ = max_index;
}
else
{
for (int y = ystart; y < yend; ++y)
for (int x = xstart; x < xend; ++x)
{
const int index = y * inp_width + x;
float val = srcData[index];
max_val = std::max(max_val, val);
}
*dstData++ = max_val;
}
}
if( ++x0 >= width )
{
x0 = 0;
if( ++y0 >= height )
{
y0 = 0;
if( ++c >= channels )
{
c = 0;
if( ++n >= nimgs )
break;
}
srcData = src_->ptr<float>(n, c);
}
}
}
}
};
void maxPooling(Mat &src, Mat &dst, Mat &mask)
{
const int nstripes = getNumThreads();
MaxPoolingInvoker mp(src, dst, mask, kernel, stride, pad, nstripes, computeMaxIdx);
parallel_for_(Range(0, nstripes), mp, nstripes);
}
void avePooling(Mat &src, Mat &dst)
{
Size inp(src.size[3], src.size[2]),
out(dst.size[3], dst.size[2]);
for (int n = 0; n < src.size[0]; ++n)
{
for (int c = 0; c < src.size[1]; ++c)
{
const float *srcData = src.ptr<float>(n, c);
float *dstData = dst.ptr<float>(n, c);
for (int ph = 0; ph < out.height; ++ph)
{
for (int pw = 0; pw < out.width; ++pw)
{
int hstart = ph * stride.height - pad.height;
int wstart = pw * stride.width - pad.width;
int hend = min(hstart + kernel.height, inp.height + pad.height);
int wend = min(wstart + kernel.width, inp.width + pad.width);
int poolSize = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, inp.height);
wend = min(wend, inp.width);
dstData[ph * out.width + pw] = 0.f;
for (int h = hstart; h < hend; ++h)
for (int w = wstart; w < wend; ++w)
dstData[ph * out.width + pw] += srcData[h * inp.width + w];
dstData[ph * out.width + pw] /= poolSize;
}
}
}
}
}
virtual Ptr<BackendNode> initMaxPoolingHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
const int inWidth = inputBuffer.width();
const int inHeight = inputBuffer.height();
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::RDom r(0, kernel.width, 0, kernel.height);
Halide::Expr kx, ky;
if (pad.width || pad.height)
{
kx = clamp(x * stride.width + r.x - pad.width, 0, inWidth - 1);
ky = clamp(y * stride.height + r.y - pad.height, 0, inHeight - 1);
}
else
{
kx = min(x * stride.width + r.x, inWidth - 1);
ky = min(y * stride.height + r.y, inHeight - 1);
}
// Halide::argmax returns tuple (r.x, r.y, max).
Halide::Tuple res = argmax(inputBuffer(kx, ky, c, n));
// Compute offset from argmax in range [0, kernel_size).
Halide::Expr max_index;
if (pad.width || pad.height)
{
max_index = clamp(y * stride.height + res[1] - pad.height,
0, inHeight - 1) * inWidth +
clamp(x * stride.width + res[0] - pad.width,
0, inWidth - 1);
}
else
{
max_index = min(y * stride.height + res[1], inHeight - 1) * inWidth +
min(x * stride.width + res[0], inWidth - 1);
}
top(x, y, c, n) = { res[2], Halide::cast<float>(max_index) };
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
virtual Ptr<BackendNode> initAvePoolingHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
const int inW = inputBuffer.width(), inH = inputBuffer.height();
if ((inW - kernel.width) % stride.width || (inH - kernel.height) % stride.height)
{
CV_Error(cv::Error::StsNotImplemented,
"Halide backend for average pooling with partial "
"kernels is not implemented");
}
const float norm = 1.0f / (kernel.width * kernel.height);
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::RDom r(0, kernel.width, 0, kernel.height);
top(x, y, c, n) = sum(
inputBuffer(x * stride.width + r.x,
y * stride.height + r.y, c, n)) * norm;
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
virtual void applyHalideScheduler(Ptr<BackendNode>& node,
const std::vector<Mat*> &inputs,
const std::vector<Mat> &outputs,
int targetId) const
{
#ifdef HAVE_HALIDE
if (targetId != DNN_TARGET_CPU)
{
Layer::applyHalideScheduler(node, inputs, outputs, targetId);
return;
}
Halide::Var x("x"), y("y"), c("c"), n("n"), tile("tile"),
xi("xi"), yi("yi"), ci("ci"), xo("xo"), yo("yo"), co("co");
Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs.back();
int outW, outH, outC, outN;
getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
if (outW < 8 || outH < 8)
{
if (outC > 8)
top.split(c, co, ci, 8)
.fuse(x, y, tile).fuse(co, tile, tile).fuse(n, tile, tile)
.parallel(tile)
.vectorize(ci);
else
{
top.fuse(y, c, tile).fuse(n, tile, tile)
.parallel(tile);
if (outW > 1)
top.vectorize(x);
}
}
else
{
if (outC > 8)
top.split(x, xo, xi, 8).split(y, yo, yi, 8).split(c, co, ci, 8)
.fuse(xo, yo, tile).fuse(co, tile, tile).fuse(n, tile, tile)
.parallel(tile)
.vectorize(xi);
else
top.split(x, xo, xi, 8).split(y, yo, yi, 8)
.fuse(xo, yo, tile).fuse(c, tile, tile).fuse(n, tile, tile)
.parallel(tile)
.vectorize(xi);
}
#endif // HAVE_HALIDE
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() != 0);
Size in(inputs[0][3], inputs[0][2]), out;
if (globalPooling)
{
out.height = 1;
out.width = 1;
}
else if (padMode.empty())
{
//Yeah, something strange Caffe scheme-)
out.height = static_cast<int>(ceil(static_cast<float>(in.height + 2 * pad.height -
kernel.height) / stride.height)) + 1;
out.width = static_cast<int>(ceil(static_cast<float>(in.width + 2 * pad.width -
kernel.width) / stride.width)) + 1;
if (pad.height || pad.width)
{
// If we have padding, ensure that the last pooling starts strictly
// inside the image (instead of at the padding); otherwise clip the last.
if ((out.height - 1) * stride.height >= in.height + pad.height)
--out.height;
if ((out.width - 1) * stride.width >= in.width + pad.width)
--out.width;
CV_Assert((out.height - 1) * stride.height < in.height + pad.height);
CV_Assert((out.width - 1) * stride.width < in.width + pad.width);
}
}
else
{
getConvPoolOutParams(in, kernel, stride,
padMode, out);
}
outputs.resize(type == MAX ? 2 * inputs.size() : inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
{
size_t index = type == MAX ? 2*i : i;
int dims[] = {inputs[i][0], inputs[i][1], out.height, out.width};
outputs[index] = shape(dims);
if (type == MAX)
outputs[index + 1] = shape(dims);
}
return false;
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)inputs; // suppress unused variable warning
long flops = 0;
for(int i = 0; i < outputs.size(); i++)
{
if (type == MAX)
{
if (i%2 == 0)
flops += total(outputs[i])*kernel.area();
}
else
{
flops += total(outputs[i])*(kernel.area() + 1);
}
}
return flops;
}
};
Ptr<PoolingLayer> PoolingLayer::create(const LayerParams& params)
{
return Ptr<PoolingLayer>(new PoolingLayerImpl(params));
}
}
}

View File

@ -0,0 +1,379 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <float.h>
#include <algorithm>
#include <cmath>
namespace cv
{
namespace dnn
{
class PriorBoxLayerImpl : public PriorBoxLayer
{
public:
bool getParameterDict(const LayerParams &params,
const std::string &parameterName,
DictValue& result)
{
if (!params.has(parameterName))
{
return false;
}
result = params.get(parameterName);
return true;
}
template<typename T>
T getParameter(const LayerParams &params,
const std::string &parameterName,
const size_t &idx=0,
const bool required=true,
const T& defaultValue=T())
{
DictValue dictValue;
bool success = getParameterDict(params, parameterName, dictValue);
if(!success)
{
if(required)
{
std::string message = _layerName;
message += " layer parameter does not contain ";
message += parameterName;
message += " parameter.";
CV_Error(Error::StsBadArg, message);
}
else
{
return defaultValue;
}
}
return dictValue.get<T>(idx);
}
void getAspectRatios(const LayerParams &params)
{
DictValue aspectRatioParameter;
bool aspectRatioRetieved = getParameterDict(params, "aspect_ratio", aspectRatioParameter);
CV_Assert(aspectRatioRetieved);
for (int i = 0; i < aspectRatioParameter.size(); ++i)
{
float aspectRatio = aspectRatioParameter.get<float>(i);
bool alreadyExists = false;
for (size_t j = 0; j < _aspectRatios.size(); ++j)
{
if (fabs(aspectRatio - _aspectRatios[j]) < 1e-6)
{
alreadyExists = true;
break;
}
}
if (!alreadyExists)
{
_aspectRatios.push_back(aspectRatio);
if (_flip)
{
_aspectRatios.push_back(1./aspectRatio);
}
}
}
}
void getVariance(const LayerParams &params)
{
DictValue varianceParameter;
bool varianceParameterRetrieved = getParameterDict(params, "variance", varianceParameter);
CV_Assert(varianceParameterRetrieved);
int varianceSize = varianceParameter.size();
if (varianceSize > 1)
{
// Must and only provide 4 variance.
CV_Assert(varianceSize == 4);
for (int i = 0; i < varianceSize; ++i)
{
float variance = varianceParameter.get<float>(i);
CV_Assert(variance > 0);
_variance.push_back(variance);
}
}
else
{
if (varianceSize == 1)
{
float variance = varianceParameter.get<float>(0);
CV_Assert(variance > 0);
_variance.push_back(variance);
}
else
{
// Set default to 0.1.
_variance.push_back(0.1f);
}
}
}
PriorBoxLayerImpl(const LayerParams &params)
{
setParamsFrom(params);
_minSize = getParameter<unsigned>(params, "min_size");
CV_Assert(_minSize > 0);
_flip = getParameter<bool>(params, "flip");
_clip = getParameter<bool>(params, "clip");
_aspectRatios.clear();
_aspectRatios.push_back(1.);
getAspectRatios(params);
getVariance(params);
_numPriors = _aspectRatios.size();
_maxSize = -1;
if (params.has("max_size"))
{
_maxSize = params.get("max_size").get<float>(0);
CV_Assert(_maxSize > _minSize);
_numPriors += 1;
}
if (params.has("step_h") || params.has("step_w")) {
CV_Assert(!params.has("step"));
_stepY = getParameter<float>(params, "step_h");
CV_Assert(_stepY > 0.);
_stepX = getParameter<float>(params, "step_w");
CV_Assert(_stepX > 0.);
} else if (params.has("step")) {
const float step = getParameter<float>(params, "step");
CV_Assert(step > 0);
_stepY = step;
_stepX = step;
} else {
_stepY = 0;
_stepX = 0;
}
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() == 2);
int layerHeight = inputs[0][2];
int layerWidth = inputs[0][3];
// Since all images in a batch has same height and width, we only need to
// generate one set of priors which can be shared across all images.
size_t outNum = 1;
// 2 channels. First channel stores the mean of each prior coordinate.
// Second channel stores the variance of each prior coordinate.
size_t outChannels = 2;
outputs.resize(1, shape(outNum, outChannels,
layerHeight * layerWidth * _numPriors * 4));
return false;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
int _layerWidth = inputs[0]->size[3];
int _layerHeight = inputs[0]->size[2];
int _imageWidth = inputs[1]->size[3];
int _imageHeight = inputs[1]->size[2];
float stepX, stepY;
if (_stepX == 0 || _stepY == 0) {
stepX = static_cast<float>(_imageWidth) / _layerWidth;
stepY = static_cast<float>(_imageHeight) / _layerHeight;
} else {
stepX = _stepX;
stepY = _stepY;
}
int _outChannelSize = _layerHeight * _layerWidth * _numPriors * 4;
float* outputPtr = outputs[0].ptr<float>();
// first prior: aspect_ratio = 1, size = min_size
int idx = 0;
for (size_t h = 0; h < _layerHeight; ++h)
{
for (size_t w = 0; w < _layerWidth; ++w)
{
_boxWidth = _boxHeight = _minSize;
float center_x = (w + 0.5) * stepX;
float center_y = (h + 0.5) * stepY;
// xmin
outputPtr[idx++] = (center_x - _boxWidth / 2.) / _imageWidth;
// ymin
outputPtr[idx++] = (center_y - _boxHeight / 2.) / _imageHeight;
// xmax
outputPtr[idx++] = (center_x + _boxWidth / 2.) / _imageWidth;
// ymax
outputPtr[idx++] = (center_y + _boxHeight / 2.) / _imageHeight;
if (_maxSize > 0)
{
// second prior: aspect_ratio = 1, size = sqrt(min_size * max_size)
_boxWidth = _boxHeight = sqrt(_minSize * _maxSize);
// xmin
outputPtr[idx++] = (center_x - _boxWidth / 2.) / _imageWidth;
// ymin
outputPtr[idx++] = (center_y - _boxHeight / 2.) / _imageHeight;
// xmax
outputPtr[idx++] = (center_x + _boxWidth / 2.) / _imageWidth;
// ymax
outputPtr[idx++] = (center_y + _boxHeight / 2.) / _imageHeight;
}
// rest of priors
for (size_t r = 0; r < _aspectRatios.size(); ++r)
{
float ar = _aspectRatios[r];
if (fabs(ar - 1.) < 1e-6)
{
continue;
}
_boxWidth = _minSize * sqrt(ar);
_boxHeight = _minSize / sqrt(ar);
// xmin
outputPtr[idx++] = (center_x - _boxWidth / 2.) / _imageWidth;
// ymin
outputPtr[idx++] = (center_y - _boxHeight / 2.) / _imageHeight;
// xmax
outputPtr[idx++] = (center_x + _boxWidth / 2.) / _imageWidth;
// ymax
outputPtr[idx++] = (center_y + _boxHeight / 2.) / _imageHeight;
}
}
}
// clip the prior's coordidate such that it is within [0, 1]
if (_clip)
{
for (size_t d = 0; d < _outChannelSize; ++d)
{
outputPtr[d] = std::min<float>(std::max<float>(outputPtr[d], 0.), 1.);
}
}
// set the variance.
outputPtr = outputs[0].ptr<float>(0, 1);
if(_variance.size() == 1)
{
Mat secondChannel(outputs[0].size[2], outputs[0].size[3], CV_32F, outputPtr);
secondChannel.setTo(Scalar(_variance[0]));
}
else
{
int count = 0;
for (size_t h = 0; h < _layerHeight; ++h)
{
for (size_t w = 0; w < _layerWidth; ++w)
{
for (size_t i = 0; i < _numPriors; ++i)
{
for (int j = 0; j < 4; ++j)
{
outputPtr[count] = _variance[j];
++count;
}
}
}
}
}
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for (int i = 0; i < inputs.size(); i++)
{
flops += total(inputs[i], 2) * _numPriors * 4;
}
return flops;
}
float _minSize;
float _maxSize;
float _boxWidth;
float _boxHeight;
float _stepX, _stepY;
std::vector<float> _aspectRatios;
std::vector<float> _variance;
bool _flip;
bool _clip;
size_t _numPriors;
static const size_t _numAxes = 4;
static const std::string _layerName;
};
const std::string PriorBoxLayerImpl::_layerName = std::string("PriorBox");
Ptr<PriorBoxLayer> PriorBoxLayer::create(const LayerParams &params)
{
return Ptr<PriorBoxLayer>(new PriorBoxLayerImpl(params));
}
}
}

View File

@ -0,0 +1,443 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include <iostream>
#include <iterator>
#include <cmath>
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
template<typename Dtype>
static void tanh(const Mat &src, Mat &dst)
{
MatConstIterator_<Dtype> itSrc = src.begin<Dtype>();
MatIterator_<Dtype> itDst = dst.begin<Dtype>();
for (; itSrc != src.end<Dtype>(); itSrc++, itDst++)
*itDst = std::tanh(*itSrc);
}
//TODO: make utils method
static void tanh(const Mat &src, Mat &dst)
{
dst.create(src.dims, (const int*)src.size, src.type());
if (src.type() == CV_32F)
tanh<float>(src, dst);
else if (src.type() == CV_64F)
tanh<double>(src, dst);
else
CV_Error(Error::StsUnsupportedFormat, "Function supports only floating point types");
}
static void sigmoid(const Mat &src, Mat &dst)
{
cv::exp(-src, dst);
cv::pow(1 + dst, -1, dst);
}
class LSTMLayerImpl : public LSTMLayer
{
int numTimeStamps, numSamples;
bool allocated;
MatShape outTailShape; //shape of single output sample
MatShape outTsShape; //shape of N output samples
bool useTimestampDim;
bool produceCellOutput;
public:
LSTMLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
type = "LSTM";
useTimestampDim = true;
produceCellOutput = false;
allocated = false;
outTailShape.clear();
}
void setUseTimstampsDim(bool use)
{
CV_Assert(!allocated);
useTimestampDim = use;
}
void setProduceCellOutput(bool produce)
{
CV_Assert(!allocated);
produceCellOutput = produce;
}
void setOutShape(const MatShape &outTailShape_)
{
CV_Assert(!allocated || total(outTailShape) == total(outTailShape_));
outTailShape = outTailShape_;
}
void setWeights(const Mat &Wh, const Mat &Wx, const Mat &bias)
{
CV_Assert(Wh.dims == 2 && Wx.dims == 2);
CV_Assert(Wh.rows == Wx.rows);
CV_Assert(Wh.rows == 4*Wh.cols);
CV_Assert(Wh.rows == (int)bias.total());
CV_Assert(Wh.type() == Wx.type() && Wx.type() == bias.type());
blobs.resize(3);
blobs[0] = Mat(Wh.clone());
blobs[1] = Mat(Wx.clone());
blobs[2] = Mat(bias.clone()).reshape(1, 1);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(blobs.size() == 3);
CV_Assert(inputs.size() == 1);
const MatShape& inp0 = inputs[0];
const Mat &Wh = blobs[0], &Wx = blobs[1];
int _numOut = Wh.size[1];
int _numInp = Wx.size[1];
MatShape outTailShape_(outTailShape), outResShape;
if (!outTailShape_.empty())
CV_Assert(total(outTailShape_) == _numOut);
else
outTailShape_.assign(1, _numOut);
int _numTimeStamps, _numSamples;
if (useTimestampDim)
{
CV_Assert(inp0.size() >= 2 && total(inp0, 2) == _numInp);
_numTimeStamps = inp0[0];
_numSamples = inp0[1];
outResShape.push_back(_numTimeStamps);
}
else
{
CV_Assert(inp0.size() >= 2 && total(inp0, 1) == _numInp);
_numTimeStamps = 1;
_numSamples = inp0[0];
}
outResShape.push_back(_numSamples);
outResShape.insert(outResShape.end(), outTailShape_.begin(), outTailShape_.end());
size_t noutputs = produceCellOutput ? 2 : 1;
outputs.assign(noutputs, outResShape);
internals.assign(1, shape(_numSamples, _numOut)); // hInternal
internals.push_back(shape(_numSamples, _numOut)); // cInternal
internals.push_back(shape(_numSamples, 1)); // dummyOnes
internals.push_back(shape(_numSamples, 4*_numOut)); // gates
return false;
}
void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output)
{
CV_Assert(blobs.size() == 3);
CV_Assert(input.size() == 1);
const Mat& inp0 = *input[0];
Mat &Wh = blobs[0], &Wx = blobs[1];
int numOut = Wh.size[1];
int numInp = Wx.size[1];
if (!outTailShape.empty())
CV_Assert(total(outTailShape) == numOut);
else
outTailShape.assign(1, numOut);
if (useTimestampDim)
{
CV_Assert(inp0.dims >= 2 && (int)inp0.total(2) == numInp);
numTimeStamps = inp0.size[0];
numSamples = inp0.size[1];
}
else
{
CV_Assert(inp0.dims >= 2 && (int)inp0.total(1) == numInp);
numTimeStamps = 1;
numSamples = inp0.size[0];
}
outTsShape.clear();
outTsShape.push_back(numSamples);
outTsShape.insert(outTsShape.end(), outTailShape.begin(), outTailShape.end());
allocated = true;
}
void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals)
{
const Mat &Wh = blobs[0];
const Mat &Wx = blobs[1];
const Mat &bias = blobs[2];
int numOut = Wh.size[1];
Mat hInternal = internals[0], cInternal = internals[1],
dummyOnes = internals[2], gates = internals[3];
hInternal.setTo(0.);
cInternal.setTo(0.);
dummyOnes.setTo(1.);
int numSamplesTotal = numTimeStamps*numSamples;
Mat xTs = input[0]->reshape(1, numSamplesTotal);
Mat hOutTs = output[0].reshape(1, numSamplesTotal);
Mat cOutTs = produceCellOutput ? output[1].reshape(1, numSamplesTotal) : Mat();
for (int ts = 0; ts < numTimeStamps; ts++)
{
Range curRowRange(ts*numSamples, (ts + 1)*numSamples);
Mat xCurr = xTs.rowRange(curRowRange);
gemm(xCurr, Wx, 1, gates, 0, gates, GEMM_2_T); // Wx * x_t
gemm(hInternal, Wh, 1, gates, 1, gates, GEMM_2_T); //+Wh * h_{t-1}
gemm(dummyOnes, bias, 1, gates, 1, gates); //+b
Mat getesIFO = gates.colRange(0, 3*numOut);
Mat gateI = gates.colRange(0*numOut, 1*numOut);
Mat gateF = gates.colRange(1*numOut, 2*numOut);
Mat gateO = gates.colRange(2*numOut, 3*numOut);
Mat gateG = gates.colRange(3*numOut, 4*numOut);
sigmoid(getesIFO, getesIFO);
tanh(gateG, gateG);
//compute c_t
multiply(gateF, cInternal, gateF); // f_t (*) c_{t-1}
multiply(gateI, gateG, gateI); // i_t (*) g_t
add(gateF, gateI, cInternal); // c_t = f_t (*) c_{t-1} + i_t (*) g_t
//compute h_t
tanh(cInternal, hInternal);
multiply(gateO, hInternal, hInternal);
//save results in output blobs
hInternal.copyTo(hOutTs.rowRange(curRowRange));
if (produceCellOutput)
cInternal.copyTo(cOutTs.rowRange(curRowRange));
}
}
};
Ptr<LSTMLayer> LSTMLayer::create(const LayerParams& params)
{
return Ptr<LSTMLayer>(new LSTMLayerImpl(params));
}
int LSTMLayer::inputNameToIndex(String inputName)
{
if (inputName.toLowerCase() == "x")
return 0;
return -1;
}
int LSTMLayer::outputNameToIndex(String outputName)
{
if (outputName.toLowerCase() == "h")
return 0;
else if (outputName.toLowerCase() == "c")
return 1;
return -1;
}
class RNNLayerImpl : public RNNLayer
{
int numX, numH, numO;
int numSamples, numTimestamps, numSamplesTotal;
int dtype;
Mat Whh, Wxh, bh;
Mat Who, bo;
bool produceH;
public:
RNNLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
type = "RNN";
produceH = false;
}
void setProduceHiddenOutput(bool produce = false)
{
produceH = produce;
}
void setWeights(const Mat &W_xh, const Mat &b_h, const Mat &W_hh, const Mat &W_ho, const Mat &b_o)
{
CV_Assert(W_hh.dims == 2 && W_xh.dims == 2);
CV_Assert(W_hh.size[0] == W_xh.size[0] && W_hh.size[0] == W_hh.size[1] && (int)b_h.total() == W_xh.size[0]);
CV_Assert(W_ho.size[0] == (int)b_o.total());
CV_Assert(W_ho.size[1] == W_hh.size[1]);
blobs.resize(5);
blobs[0] = Mat(W_xh.clone());
blobs[1] = Mat(b_h.clone());
blobs[2] = Mat(W_hh.clone());
blobs[3] = Mat(W_ho.clone());
blobs[4] = Mat(b_o.clone());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() >= 1 && inputs.size() <= 2);
Mat Who_ = blobs[3];
Mat Wxh_ = blobs[0];
int numTimestamps_ = inputs[0][0];
int numSamples_ = inputs[0][1];
int numO_ = Who_.rows;
int numH_ = Wxh_.rows;
outputs.clear();
int dims[] = {numTimestamps_, numSamples_, numO_};
outputs.push_back(shape(dims, 3));
dims[2] = numH_;
if (produceH)
outputs.push_back(shape(dims, 3));
internals.assign(2, shape(numSamples_, numH_));
internals.push_back(shape(numSamples_, 1));
return false;
}
void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output)
{
CV_Assert(input.size() >= 1 && input.size() <= 2);
Wxh = blobs[0];
bh = blobs[1];
Whh = blobs[2];
Who = blobs[3];
bo = blobs[4];
numH = Wxh.rows;
numX = Wxh.cols;
numO = Who.rows;
const Mat& inp0 = *input[0];
CV_Assert(inp0.dims >= 2);
CV_Assert(inp0.total(2) == numX);
dtype = CV_32F;
CV_Assert(inp0.type() == dtype);
numTimestamps = inp0.size[0];
numSamples = inp0.size[1];
numSamplesTotal = numTimestamps * numSamples;
bh = bh.reshape(1, 1); //is 1 x numH Mat
bo = bo.reshape(1, 1); //is 1 x numO Mat
}
void reshapeOutput(std::vector<Mat> &output)
{
output.resize(produceH ? 2 : 1);
int sz0[] = { numTimestamps, numSamples, numO };
output[0].create(3, sz0, dtype);
if (produceH)
{
int sz1[] = { numTimestamps, numSamples, numH };
output[1].create(3, sz1, dtype);
}
}
void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals)
{
Mat xTs = input[0]->reshape(1, numSamplesTotal);
Mat oTs = output[0].reshape(1, numSamplesTotal);
Mat hTs = produceH ? output[1].reshape(1, numSamplesTotal) : Mat();
Mat hCurr = internals[0];
Mat hPrev = internals[1];
Mat dummyBiasOnes = internals[2];
hPrev.setTo(0.);
dummyBiasOnes.setTo(1.);
for (int ts = 0; ts < numTimestamps; ts++)
{
Range curRowRange = Range(ts * numSamples, (ts + 1) * numSamples);
Mat xCurr = xTs.rowRange(curRowRange);
gemm(hPrev, Whh, 1, hCurr, 0, hCurr, GEMM_2_T); // W_{hh} * h_{prev}
gemm(xCurr, Wxh, 1, hCurr, 1, hCurr, GEMM_2_T); //+W_{xh} * x_{curr}
gemm(dummyBiasOnes, bh, 1, hCurr, 1, hCurr); //+bh
tanh(hCurr, hPrev);
Mat oCurr = oTs.rowRange(curRowRange);
gemm(hPrev, Who, 1, oCurr, 0, oCurr, GEMM_2_T); // W_{ho} * h_{prev}
gemm(dummyBiasOnes, bo, 1, oCurr, 1, oCurr); //+b_o
tanh(oCurr, oCurr);
if (produceH)
hPrev.copyTo(hTs.rowRange(curRowRange));
}
}
};
CV_EXPORTS_W Ptr<RNNLayer> RNNLayer::create(const LayerParams& params)
{
return Ptr<RNNLayer>(new RNNLayerImpl(params));
}
}
}

View File

@ -0,0 +1,247 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
static void computeShapeByReshapeMask(const MatShape &srcShape,
const MatShape &maskShape,
Range srcRange /*= Range::all()*/,
MatShape& dstShape)
{
int srcShapeSize = (int)srcShape.size();
int maskShapeSize = (int)maskShape.size();
if (srcRange == Range::all())
srcRange = Range(0, srcShapeSize);
else
{
int sz = srcRange.size();
srcRange.start = clamp(srcRange.start, srcShapeSize);
srcRange.end = srcRange.end == INT_MAX ? srcShapeSize : srcRange.start + sz;
}
bool explicitMask = !maskShape.empty(); // All mask values are positive.
for (int i = 0, n = maskShape.size(); i < n && explicitMask; ++i)
{
explicitMask = maskShape[i] > 0;
}
// Working range of source shape is a range where area(src) == area(mask).
if (explicitMask)
{
int maskTotal = total(maskShape);
for (int i = srcRange.start + 1; i < srcRange.end; ++i)
{
if (total(srcShape, i, srcRange.end) != maskTotal)
{
srcRange.start = i - 1;
break;
}
}
CV_Assert(total(srcShape, srcRange.start, srcRange.end) == maskTotal);
}
CV_Assert(0 <= srcRange.start && srcRange.start <= srcRange.end && srcRange.end <= srcShapeSize);
int dstShapeSize = srcShapeSize - srcRange.size() + maskShapeSize;
dstShape.resize(dstShapeSize);
std::copy(srcShape.begin(), srcShape.begin() + srcRange.start, dstShape.begin());
std::copy(srcShape.begin() + srcRange.end, srcShape.begin() + srcShapeSize, dstShape.begin() + srcRange.start + maskShapeSize);
int inferDim = -1;
for (int i = 0; i < maskShapeSize; i++)
{
if (maskShape[i] > 0)
{
dstShape[srcRange.start + i] = maskShape[i];
}
else if (maskShape[i] == 0)
{
if (srcRange.start + i >= srcShapeSize)
CV_Error(Error::StsBadArg, format("Copy dim[%d] (which has zero size) is out of the source shape bounds", srcRange.start + i));
dstShape[srcRange.start + i] = srcShape[srcRange.start + i];
}
else if (maskShape[i] == -1)
{
if (inferDim != -1)
CV_Error(Error::StsAssert, "Duplicate of inferred dim (which is denoted by -1)");
inferDim = srcRange.start + i;
dstShape[inferDim] = 1;
}
else
CV_Error(Error::StsBadArg, "maskShape[i] >= -1");
}
size_t srcTotal = total(srcShape);
size_t dstTotal = total(dstShape);
if (inferDim != -1)
{
if (srcTotal % dstTotal != 0)
CV_Error(Error::StsBackTrace, "Can't infer a dim denoted by -1");
dstShape[inferDim] = (int)(srcTotal / dstTotal);
}
else
{
CV_Assert(srcTotal == dstTotal);
}
}
class ReshapeLayerImpl : public ReshapeLayer
{
public:
ReshapeLayerImpl(const LayerParams& params):
performReordering(false)
{
setParamsFrom(params);
int axis = params.get<int>("axis", 0);
int numAxes = params.get<int>("num_axes", -1);
enableReordering = params.get<bool>("reorder_dims", false);
CV_Assert(numAxes >= -1);
newShapeRange = (numAxes == -1) ? Range(axis, INT_MAX) : Range(axis, axis + numAxes);
newShapeDesc.clear();
if (params.has("dim"))
{
const DictValue &paramShape = params.get("dim");
int i, dims = paramShape.size();
newShapeDesc.resize(dims);
for (i = 0; i < dims; i++)
newShapeDesc[i] = paramShape.get<int>(i);
}
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
outputs.clear();
for (size_t i = 0; i < inputs.size(); i++)
{
outputs.push_back(MatShape());
computeShapeByReshapeMask(inputs[i], newShapeDesc, newShapeRange, outputs.back());
}
internals = outputs;
return true;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
{
CV_Assert(inputs.size());
CV_Assert(outputs.size());
Mat srcBlob = *inputs[0];
int dims = srcBlob.dims;
MatShape inputShape = shape(srcBlob), outShape = shape(outputs[0]);
// input.total() == output.total(). So if reordering is require,
// one of the sizes will be are not equal.
// Example where reordering is require: from 1x128x4x4 to 1x2048
// Example where reordering is NOT require: from 1x1024x1x1 to 1x1024.
bool reorderingRequire = false;
const int minDims = min(dims, (int)outShape.size());
for (int i = 0; !reorderingRequire && i < minDims; ++i)
reorderingRequire = inputShape[i] != outShape[i];
performReordering = enableReordering && reorderingRequire;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for (size_t i = 0; i < inputs.size(); i++)
{
Mat srcBlob = *inputs[i];
MatShape inputShape = shape(srcBlob), outShape = shape(outputs[i]);
if (performReordering)
{
float *dstData = internals[i].ptr<float>();
const float *srcData = srcBlob.ptr<float>();
int num = inputShape[0], channels = inputShape[1], height = inputShape[2], width = inputShape[3];
int total = num*channels*height*width;
for(int i_n = 0; i_n < num; i_n++) {
for(int i_c = 0; i_c < channels; i_c++) {
for(int i_h = 0; i_h < height; i_h++) {
for(int i_w = 0; i_w < width; i_w++) {
int src_i = channels*height*width*i_n + height*width*i_c + width*i_h + i_w;
int dst_i = channels*height*width*i_n + i_c + channels*width*i_h + channels*i_w;
CV_Assert(dst_i < total);
CV_Assert(src_i < total);
dstData[dst_i] = srcData[src_i];
}
}
}
}
internals[i].copyTo(outputs[i]);
}
else
{
if (outputs[i].data != srcBlob.data)
srcBlob.reshape(1, outShape).copyTo(outputs[i]);
}
}
}
private:
std::vector<std::vector<int> > outShapes;
bool enableReordering, performReordering;
};
Ptr<ReshapeLayer> ReshapeLayer::create(const LayerParams& params)
{
return Ptr<ReshapeLayer>(new ReshapeLayerImpl(params));
}
}
}

View File

@ -0,0 +1,147 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of Scale layer.
*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "op_halide.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class ScaleLayerImpl : public ScaleLayer
{
public:
ScaleLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
hasBias = params.get<bool>("bias_term", false);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide();
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
CV_Assert(blobs.size() == 1 + hasBias);
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &inpBlob = *inputs[ii];
Mat &outBlob = outputs[ii];
CV_Assert(inpBlob.size[1] == blobs[0].total());
if (hasBias)
CV_Assert(inpBlob.size[1] == blobs[1].total());
CV_Assert(inpBlob.type() == CV_32F && outBlob.type() == CV_32F);
for( int cn = 0; cn < inpBlob.size[0]; cn++ )
{
for (int n = 0; n < inpBlob.size[1]; n++)
{
float w = blobs[0].at<float>(n);
float b = hasBias ? blobs[1].at<float>(n) : 0;
Mat outBlobPlane = getPlane(outBlob, cn, n);
Mat inpBlobPlane = getPlane(inpBlob, cn, n);
inpBlobPlane.convertTo(outBlobPlane, CV_32F, w, b);
}
}
}
}
virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node)
{
switch (node->backendId)
{
case DNN_BACKEND_HALIDE:
{
#ifdef HAVE_HALIDE
auto base = node.dynamicCast<HalideBackendNode>();
Halide::Func& input = base->funcs.back();
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = attachHalide(input(x, y, c, n));
return Ptr<BackendNode>(new HalideBackendNode(base, top));
#endif // HAVE_HALIDE
break;
}
}
return Ptr<BackendNode>();
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> input = halideBuffer(inputs[0]);
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = attachHalide(input(x, y, c, n));
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
#ifdef HAVE_HALIDE
// attachHalide can work both with Halide::Buffer and Halide::Func. In the
// second case it will be a fusion.
Halide::Func attachHalide(const Halide::Expr& input)
{
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::Var x("x"), y("y"), c("c"), n("n");
const int numChannels = blobs[0].total();
auto weights = wrapToHalideBuffer(blobs[0], {numChannels});
Halide::Expr topExpr = input * weights(c);
if (hasBias)
{
auto bias = wrapToHalideBuffer(blobs[1], {numChannels});
topExpr += bias(c);
}
top(x, y, c, n) = topExpr;
return top;
}
#endif // HAVE_HALIDE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for(int i = 0; i < inputs.size(); i++)
{
flops += 2*total(inputs[i]);
}
return flops;
}
};
Ptr<ScaleLayer> ScaleLayer::create(const LayerParams& params)
{
return Ptr<ScaleLayer>(new ScaleLayerImpl(params));
}
} // namespace dnn
} // namespace cv

View File

@ -0,0 +1,96 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of shift layer, which adds up const values to blob.
*/
#include "../precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class ShiftLayerImpl : public ShiftLayer
{
public:
ShiftLayerImpl(const LayerParams &params)
{
setParamsFrom(params);
CV_Assert(blobs.size() == 1);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
internals.assign(1, shape(1, total(inputs[0], 2)));
return true;
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
CV_Assert(inputs.size() > 0);
CV_Assert(blobs.size() > 0);
if(inputs[0]->dims == blobs[0].dims)
{
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &inpBlob = *inputs[ii];
Mat &outBlob = outputs[ii];
outBlob = inpBlob + blobs[0];
}
}
else
{
Mat biasOnesMat = internals[0];
biasOnesMat.setTo(1);
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &inpBlob = *inputs[ii];
Mat &outBlob = outputs[ii];
inpBlob.copyTo(outBlob);
for (int n = 0; n < inpBlob.size[0]; n++)
{
Mat dstMat(inpBlob.size[1], inpBlob.size[2] * inpBlob.size[3],
outBlob.type(), outBlob.ptr(n));
gemm(blobs[0], biasOnesMat, 1, dstMat, 1, dstMat); //TODO: gemv
}
}
}
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for(int i= 0; i < inputs.size(); i++)
{
flops += total(inputs[i]);
}
return flops;
}
};
Ptr<ShiftLayer> ShiftLayer::create(const LayerParams& params)
{
return Ptr<ShiftLayer>(new ShiftLayerImpl(params));
}
}
}

View File

@ -0,0 +1,140 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class SliceLayerImpl : public SliceLayer
{
public:
SliceLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
axis = params.get<int>("axis", 1);
if (params.has("slice_point"))
{
const DictValue &indicesValue = params.get("slice_point");
int i, n = indicesValue.size();
sliceIndices.resize(n);
for (i = 0; i < n; i++)
sliceIndices[i] = indicesValue.get<int>(i);
}
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() == 1);
outputs.clear();
MatShape inpShape = inputs[0];
int cAxis = clamp(axis, inpShape.size());
int axisSize = inpShape[cAxis];
if (sliceIndices.size()) //divide blob with respect to passed parameters
{
std::vector<int> outAxisSize;
int prevSlice = 0;
for (size_t i = 0; i < sliceIndices.size(); i++)
{
if (!(prevSlice < sliceIndices[i] && sliceIndices[i] < axisSize))
CV_Error(Error::StsBadArg, "Slice indices should be positive, increased and don't exceed size of sliced dimension");
outAxisSize.push_back(sliceIndices[i] - prevSlice);
prevSlice = sliceIndices[i];
}
outAxisSize.push_back(axisSize - prevSlice);
for (size_t i = 0; i < outAxisSize.size(); i++)
{
inpShape[cAxis] = outAxisSize[i];
outputs.push_back(inpShape);
}
}
else //divide blob with respect to count of output blobs
{
CV_Assert(requiredOutputs > 0 && axisSize % requiredOutputs == 0);
int outAxisSize = axisSize / (int)requiredOutputs;
for (size_t i = 0; i < requiredOutputs; i++)
{
inpShape[cAxis] = outAxisSize;
outputs.push_back(inpShape);
}
}
return false;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
const Mat& inpMat = *inputs[0];
std::vector<Range> ranges(inpMat.dims, Range::all());
int cAxis = clamp(axis, inpMat.dims);
ranges[cAxis].start = 0;
for (size_t i = 0; i < outputs.size(); i++)
{
ranges[cAxis].end = ranges[cAxis].start + outputs[i].size[cAxis];
inpMat(&ranges[0]).copyTo(outputs[i]);
ranges[cAxis].start = ranges[cAxis].end;
}
}
};
Ptr<SliceLayer> SliceLayer::create(const LayerParams& params)
{
return Ptr<SliceLayer>(new SliceLayerImpl(params));
}
}
}

View File

@ -0,0 +1,213 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "op_halide.hpp"
#include <algorithm>
#include <stdlib.h>
using std::max;
namespace cv
{
namespace dnn
{
class SoftMaxLayerImpl : public SoftmaxLayer
{
public:
SoftMaxLayerImpl(const LayerParams& params)
{
axisRaw = params.get<int>("axis", 1);
logSoftMax = params.get<int>("log_softmax", false);
setParamsFrom(params);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
MatShape shape = inputs[0];
int cAxis = clamp(axisRaw, shape.size());
shape[cAxis] = 1;
internals.assign(1, shape);
return inplace;
}
virtual bool supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
const Mat &src = *inputs[0];
Mat &dst = outputs[0];
int axis = clamp(axisRaw, src.dims);
size_t outerSize = src.total(0, axis), channels = src.size[axis],
innerSize = src.total(axis + 1);
CV_Assert(src.type() == CV_32F);
CV_Assert(src.isContinuous() && dst.isContinuous());
const float *srcPtr = src.ptr<float>();
float *dstPtr = dst.ptr<float>();
float *bufPtr = internals[0].ptr<float>();
size_t outerStep = src.total(axis);
size_t cnStep = src.total(axis + 1);
//compute max along axis
for (size_t outerDim = 0; outerDim < outerSize; outerDim++)
{
size_t srcOffset = outerDim * outerStep;
size_t bufOffset = outerDim * cnStep;
memcpy(bufPtr + bufOffset, srcPtr + srcOffset, innerSize * sizeof(float));
for (size_t cnDim = 1; cnDim < channels; cnDim++)
{
for (size_t i = 0; i < innerSize; i++)
bufPtr[bufOffset + i] = std::max(bufPtr[bufOffset + i], srcPtr[srcOffset + cnDim * cnStep + i]);
}
}
//subtract max
for (size_t outerDim = 0; outerDim < outerSize; outerDim++)
{
size_t srcOffset = outerDim * outerStep;
size_t bufOffset = outerDim * cnStep;
for (size_t cnDim = 0; cnDim < channels; cnDim++)
{
for (size_t i = 0; i < innerSize; i++)
dstPtr[srcOffset + cnDim * cnStep + i] = srcPtr[srcOffset + cnDim * cnStep + i] - bufPtr[bufOffset + i];
}
}
cv::exp(dst, dst);
for (size_t outerDim = 0; outerDim < outerSize; outerDim++)
{
size_t srcOffset = outerDim * outerStep;
size_t bufOffset = outerDim * cnStep;
//sum exp along axis
for (size_t i = 0; i < innerSize; i++)
bufPtr[bufOffset + i] = 0.f;
for (size_t cnDim = 0; cnDim < channels; cnDim++)
{
for (size_t i = 0; i < innerSize; i++)
bufPtr[bufOffset + i] += dstPtr[srcOffset + cnDim * cnStep + i];
}
//divide by computed sum
for (size_t cnDim = 0; cnDim < channels; cnDim++)
{
for (size_t i = 0; i < innerSize; i++)
dstPtr[srcOffset + cnDim * cnStep + i] /= bufPtr[bufOffset + i];
}
if (logSoftMax)
{
for (size_t cnDim = 0; cnDim < channels; cnDim++)
{
for (size_t i = 0; i < innerSize; i++)
dstPtr[srcOffset + cnDim * cnStep + i] = log(dstPtr[srcOffset + cnDim * cnStep + i]);
}
}
}
}
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
int inW, inH, inC, inN;
getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN);
if (inW != 1 || inH != 1)
CV_Error(cv::Error::StsNotImplemented,
"Halide backend for SoftMax with spatial size "
"more than 1x1 is not implemented");
Halide::Var x("x"), y("y"), c("c"), n("n");
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
Halide::Func expInput("expInput");
Halide::RDom r(0, inW, 0, inH, 0, inC);
expInput(x, y, c, n) = exp(inputBuffer(x, y, c, n));
Halide::Expr globalSum = sum(expInput(r.x, r.y, r.z, n));
top(x, y, c, n) = expInput(x, y, c, n) / globalSum;
return Ptr<BackendNode>(new HalideBackendNode(top));
#endif // HAVE_HALIDE
return Ptr<BackendNode>();
}
int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
int64 flops = 0;
for (int i = 0; i < inputs.size(); i++)
{
flops += 4*total(inputs[i]);
}
return flops;
}
int axisRaw;
};
Ptr<SoftmaxLayer> SoftmaxLayer::create(const LayerParams& params)
{
return Ptr<SoftmaxLayer>(new SoftMaxLayerImpl(params));
}
}
}

View File

@ -0,0 +1,97 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
namespace cv
{
namespace dnn
{
class SplitLayerImpl : public SplitLayer
{
public:
SplitLayerImpl(const LayerParams &params)
{
setParamsFrom(params);
//TODO: maybe "top_count" param is useless because it can be determined by output connections number
if (params.has("top_count"))
{
outputsCount = params.get<int>("top_count");
CV_Assert(outputsCount >= 0);
}
else
{
outputsCount = -1;
}
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
CV_Assert(inputs.size() == 1);
Layer::getMemoryShapes(inputs, max(1, outputsCount >= 0 ? outputsCount : requiredOutputs),
outputs, internals);
return true;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for (size_t i = 0; i < outputs.size(); i++)
{
CV_Assert(inputs[0]->total() == outputs[i].total());
if (outputs[i].data != inputs[0]->data)
inputs[0]->copyTo(outputs[i]);
}
}
};
Ptr<SplitLayer> SplitLayer::create(const LayerParams& params)
{
return Ptr<SplitLayer>(new SplitLayerImpl(params));
}
}
}

View File

@ -0,0 +1,206 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#include "op_halide.hpp"
#ifdef HAVE_HALIDE
#include <HalideRuntimeOpenCL.h>
#endif // HAVE_HALIDE
namespace cv
{
namespace dnn
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat)
{
int n, c, w, h;
getCanonicalSize(mat.size, &w, &h, &c, &n);
return wrapToHalideBuffer(mat, {w, h, c, n});
}
Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat,
const std::vector<int>& sizes)
{
Halide::Buffer<float> buffer((float*)mat.data, sizes);
buffer.set_host_dirty(); // Indicate that data is on CPU.
return buffer;
}
Halide::Buffer<> halideBuffer(const Ptr<BackendWrapper>& ptr)
{
CV_Assert(!ptr.empty());
return ptr.dynamicCast<HalideBackendWrapper>()->buffer;
}
std::vector<Halide::Buffer<> > halideBuffers(const std::vector<Ptr<BackendWrapper> >& ptrs)
{
std::vector<Halide::Buffer<> > vec;
vec.reserve(ptrs.size());
for (const Ptr<BackendWrapper>& ptr : ptrs)
{
vec.push_back(halideBuffer(ptr));
}
return vec;
}
void getCanonicalSize(const Halide::Buffer<>& buffer, int* width, int* height,
int* channels, int* batch)
{
CV_Assert(buffer.dimensions() == 4);
*width = buffer.extent(0);
*height = buffer.extent(1);
*channels = buffer.extent(2);
*batch = buffer.extent(3);
}
HalideBackendNode::HalideBackendNode(const Halide::Func& func)
: BackendNode(DNN_BACKEND_HALIDE), funcs(1, func) {}
HalideBackendNode::HalideBackendNode(const std::vector<Halide::Func>& funcs)
: BackendNode(DNN_BACKEND_HALIDE), funcs(funcs) {}
HalideBackendNode::HalideBackendNode(const Ptr<HalideBackendNode>& base,
const Halide::Func& top)
: BackendNode(DNN_BACKEND_HALIDE), funcs(base->funcs)
{
funcs.back() = top;
}
HalideBackendWrapper::HalideBackendWrapper(int targetId, const cv::Mat& m)
: BackendWrapper(DNN_BACKEND_HALIDE, targetId)
{
buffer = wrapToHalideBuffer(m);
if (targetId == DNN_TARGET_CPU)
{
return;
}
else if (targetId == DNN_TARGET_OPENCL)
{
buffer.copy_to_device(halide_opencl_device_interface());
}
else
CV_Error(Error::StsNotImplemented, "Unknown target identifier");
}
HalideBackendWrapper::HalideBackendWrapper(const Ptr<BackendWrapper>& base,
const MatShape& shape)
: BackendWrapper(DNN_BACKEND_HALIDE, base->targetId)
{
int w, h, c, n;
getCanonicalSize(shape, &w, &h, &c, &n);
Halide::Buffer<float> baseBuffer = halideBuffer(base);
buffer = Halide::Buffer<float>((float*)baseBuffer.raw_buffer()->host,
{w, h, c, n});
if (baseBuffer.has_device_allocation())
{
buffer.raw_buffer()->device = baseBuffer.raw_buffer()->device;
buffer.raw_buffer()->device_interface = baseBuffer.raw_buffer()->device_interface;
buffer.set_device_dirty();
}
else
{
buffer.set_host_dirty(); // Indicate that data is on CPU.
CV_Assert(targetId == DNN_TARGET_CPU);
}
}
void HalideBackendWrapper::copyToHost()
{
CV_Assert(targetId == DNN_TARGET_CPU || buffer.device_dirty());
if (buffer.device_dirty())
{
buffer.device_sync();
buffer.copy_to_host();
}
}
#endif // HAVE_HALIDE
void getCanonicalSize(const MatSize& size, int* width, int* height,
int* channels, int* batch)
{
const int dims = size.p[-1];
CV_Assert(dims == 2 || dims == 4);
*batch = size[0];
*channels = size[1];
if (dims == 4)
{
*width = size[3];
*height = size[2];
}
else
{
*width = 1;
*height = 1;
}
}
void getCanonicalSize(const MatShape& shape, int* width, int* height,
int* channels, int* batch)
{
const int dims = shape.size();
CV_Assert(dims == 2 || dims == 4);
*batch = shape[0];
*channels = shape[1];
if (dims == 4)
{
*width = shape[3];
*height = shape[2];
}
else
{
*width = 1;
*height = 1;
}
}
void compileHalide(std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId)
{
#ifdef HAVE_HALIDE
CV_Assert(!node.empty());
Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs.back();
int outW, outH, outC, outN;
Halide::Var x("x"), y("y"), c("c"), n("n");
getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
top.bound(x, 0, outW).bound(y, 0, outH)
.bound(c, 0, outC).bound(n, 0, outN);
Halide::Target target = Halide::get_host_target();
target.set_feature(Halide::Target::NoAsserts);
if (targetId == DNN_TARGET_OPENCL)
{
target.set_feature(Halide::Target::OpenCL);
}
CV_Assert(target.supported());
top.compile_jit(target);
#endif // HAVE_HALIDE
}
void forwardHalide(std::vector<Ptr<BackendWrapper> > &outputs,
const Ptr<BackendNode>& node)
{
#ifdef HAVE_HALIDE
CV_Assert(!node.empty());
Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs.back();
auto outputBuffers = halideBuffers(outputs);
top.realize(Halide::Realization(outputBuffers));
#endif // HAVE_HALIDE
}
bool haveHalide()
{
#ifdef HAVE_HALIDE
return true;
#else
return false;
#endif // HAVE_HALIDE
}
} // namespace dnn
} // namespace cv

View File

@ -0,0 +1,84 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#ifndef __OPENCV_DNN_OP_HALIDE_HPP__
#define __OPENCV_DNN_OP_HALIDE_HPP__
#include "precomp.hpp"
#ifdef HAVE_HALIDE
#include <Halide.h>
#endif // HAVE_HALIDE
namespace cv
{
namespace dnn
{
#ifdef HAVE_HALIDE
// Returns four-dimensional buffer with float32 type that wrap cv::Mat data.
// No data copy here.
Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat);
Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat,
const std::vector<int>& shape);
// Extract batch size, number of channels, width and height from buffer.
void getCanonicalSize(const Halide::Buffer<>& buffer, int* width, int* height,
int* channels, int* batch);
// Cast pointer and create copy of Halide buffer. No data copy.
Halide::Buffer<> halideBuffer(const Ptr<BackendWrapper>& ptr);
std::vector<Halide::Buffer<> > halideBuffers(const std::vector<Ptr<BackendWrapper> >& ptrs);
class HalideBackendNode : public BackendNode
{
public:
HalideBackendNode(const Halide::Func& func);
HalideBackendNode(const std::vector<Halide::Func>& funcs);
// Initialize from the <base> node but replace last function to <top>.
// It's using in case of layers fusing when we want to keep functions of
// root layer but replace top by fused one (i.e. conv+padding to relu+padding).
HalideBackendNode(const Ptr<HalideBackendNode>& base, const Halide::Func& top);
std::vector<Halide::Func> funcs;
};
class HalideBackendWrapper : public BackendWrapper
{
public:
HalideBackendWrapper(int targetId, const cv::Mat& m);
HalideBackendWrapper(const Ptr<BackendWrapper>& base, const MatShape& shape);
virtual void copyToHost();
Halide::Buffer<float> buffer;
};
#endif // HAVE_HALIDE
// Extract batch size, number of channels, width and height from MatSize.
void getCanonicalSize(const MatSize& size, int* width, int* height,
int* channels, int* batch);
void getCanonicalSize(const MatShape& shape, int* width, int* height,
int* channels, int* batch);
// Realize Halide pipeline into output blobs.
void forwardHalide(std::vector<Ptr<BackendWrapper> > &outputs,
const Ptr<BackendNode>& node);
// Compile Halide pipeline to specific target. Use outputs to set bounds of functions.
void compileHalide(std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId);
bool haveHalide();
} // namespace dnn
} // namespace cv
#endif // __OPENCV_DNN_OP_HALIDE_HPP__

View File

@ -0,0 +1,44 @@
__kernel void ReLUForward(const int count, __global const T* in, __global T* out
#ifndef RELU_NO_SLOPE
, T negative_slope
#endif
) {
int index = get_global_id(0);
if(index < count)
#ifndef RELU_NO_SLOPE
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
#else
out[index] = in[index] > 0 ? in[index] : 0;
#endif
}
__kernel void TanHForward(const int count, __global T* in, __global T* out) {
int index = get_global_id(0);
if(index < count)
out[index] = tanh(in[index]);
}
__kernel void SigmoidForward(const int count, __global const T* in, __global T* out) {
int index = get_global_id(0);
if(index < count)
out[index] = 1. / (1. + exp(-in[index]));
}
__kernel void BNLLForward(const int n, __global const T* in, __global T* out) {
int index = get_global_id(0);
if (index < n) {
out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));
}
}
__kernel void AbsValForward(const int n, __global const T* in, __global T* out) {
int index = get_global_id(0);
if (index < n)
out[index] = fabs(in[index]);
}
__kernel void PowForward(const int n, __global const T* in, __global T* out, const T power, const T scale, const T shift) {
int index = get_global_id(0);
if (index < n)
out[index] = pow(shift + scale * in[index], power);
}

View File

@ -0,0 +1,62 @@
/*************************************************************************************
* Copyright (c) 2015, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
**************************************************************************************/
__kernel void col2im(const int n, __global const T* data_col, const int col_offset,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
__global T* data_im, const int img_offset)
{
data_col = data_col + col_offset;
data_im = data_im + img_offset;
int index = get_global_id(0);
if(index < n) {
T val = 0;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
// equivalent implementation
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}

View File

@ -0,0 +1,71 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
__kernel void im2col(__global const T *im_src, int im_src_offset,
int channels, int height_inp, int width_inp,
int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h, int stride_w,
int height_out, int width_out,
__global T *im_col, int im_col_offset
)
{
int index = get_global_id(0);
if (index >= height_out * width_out * channels)
return;
int j_out = index % width_out;
int i_out = (index / width_out) % height_out;
int c_inp = (index / width_out) / height_out;
int c_out = c_inp * kernel_h * kernel_w;
int i_inp = i_out * stride_h - pad_h;
int j_inp = j_out * stride_w - pad_w;
im_src += (c_inp * height_inp + i_inp) * width_inp + j_inp + im_src_offset;
im_col += (c_out * height_out + i_out) * width_out + j_out + im_col_offset;
for (int ki = 0; ki < kernel_h; ++ki)
for (int kj = 0; kj < kernel_w; ++kj) {
int i = i_inp + ki;
int j = j_inp + kj;
*im_col = (i >= 0 && j >= 0 && i < height_inp && j < width_inp) ?
im_src[ki * width_inp + kj] : 0;
im_col += height_out * width_out;
}
}

View File

@ -0,0 +1,76 @@
/*************************************************************************************
* Copyright (c) 2015, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
**************************************************************************************/
__kernel void LRNComputeOutput(const int nthreads, __global T* in, __global T* scale, const T negative_beta, __global T* out) {
int index = get_global_id(0);
int tmp = get_global_size(0);
for(index; index < nthreads; index += tmp)
out[index] = in[index] * pow(scale[index], negative_beta);
}
__kernel void LRNFillScale(const int nthreads, __global T* in, const int num, const int channels, const int height, const int width, const int size, const T alpha_over_size, const T k, __global T* scale) {
int index = get_global_id(0);
int tmp = get_global_size(0);
for(index; index < nthreads; index += tmp) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
in = in + offset;
scale = scale + offset;
int head = 0;
const int pre_pad = (size - 1) / 2;
const int post_pad = size - pre_pad - 1;
T accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
if (head - size >= 0) {
accum_scale -= in[(head - size) * step]
* in[(head - size) * step];
}
scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in[(head - size) * step]
* in[(head - size) * step];
}
scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}

View File

@ -0,0 +1,106 @@
/*************************************************************************************
* Copyright (c) 2015, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
**************************************************************************************/
__kernel void MaxPoolForward(const int nthreads,
__global T* bottom_data, const int num, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
__global T* top_data
#ifdef MASK
, __global float* mask
#endif
)
{
int index = get_global_id(0);
int tmp = get_global_size(0);
for(index; index < nthreads; index += tmp) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T maxval = -FLT_MAX;
int maxidx = -1;
bottom_data =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
#ifdef MASK
mask[index] = maxidx;
#endif
}
}
__kernel void AvePoolForward(const int nthreads,
__global T* bottom_data, const int num, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
__global T* top_data
#ifdef MASK
, __global float* mask // NOT USED
#endif
)
{
int index = get_global_id(0);
int tmp = get_global_size(0);
for(index; index < nthreads; index+=tmp) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
T aveval = 0;
bottom_data =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}

View File

@ -0,0 +1,75 @@
/*************************************************************************************
* Copyright (c) 2015, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
**************************************************************************************/
__kernel void kernel_channel_max(const int num, const int channels,
const int spatial_dim, __global const T* data, __global T* out) {
int index = get_global_id(0);
if(index < num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
T maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
__kernel void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, __global const T* channel_max, __global T* data) {
int index = get_global_id(0);
if(index < count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
__kernel void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, __global const T* data, __global T* channel_sum) {
int index = get_global_id(0);
if(index < num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
T sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
__kernel void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, __global const T* channel_sum, __global T* data) {
int index = get_global_id(0);
if(index < count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}

View File

@ -0,0 +1,45 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/core.hpp>
#include "cvconfig.h"
#include <opencv2/dnn.hpp>
#include <opencv2/dnn/all_layers.hpp>

View File

@ -0,0 +1,60 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "AttrValueProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "tensor.proto";
import "tensor_shape.proto";
import "types.proto";
// Protocol buffer representing the value for an attr used to configure an Op.
// Comment indicates the corresponding attr type. Only the field matching the
// attr type may be filled.
message AttrValue {
message ListValue {
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3 [packed = true]; // "list(int)"
repeated float f = 4 [packed = true]; // "list(float)"
repeated bool b = 5 [packed = true]; // "list(bool)"
repeated DataType type = 6 [packed = true]; // "list(type)"
repeated TensorShapeProto shape = 7; // "list(shape)"
repeated TensorProto tensor = 8; // "list(tensor)"
// TODO(zhifengc/josh11b): implements list(func) if needed.
}
oneof value {
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
DataType type = 6; // "type"
TensorShapeProto shape = 7; // "shape"
TensorProto tensor = 8; // "tensor"
ListValue list = 1; // any "list(...)"
// "func" represents a function. func.name is a function's name or
// a primitive op's name. func.attr.first is the name of an attr
// defined for that function. func.attr.second is the value for
// that attr in the instantiation.
NameAttrList func = 10;
// This is a placeholder only used in nodes defined inside a
// function. It indicates the attr value will be supplied when
// the function is instantiated. For example, let us suppose a
// node "N" in function "FN". "N" has an attr "A" with value
// placeholder = "foo". When FN is instantiated with attr "foo"
// set to "bar", the instantiated node N's attr A will have been
// given the value "bar".
string placeholder = 9;
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NameAttrList {
string name = 1;
map<string, AttrValue> attr = 2;
}

View File

@ -0,0 +1,95 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "FunctionProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "attr_value.proto";
import "op_def.proto";
// A library is a set of named functions.
message FunctionDefLibrary {
repeated FunctionDef function = 1;
repeated GradientDef gradient = 2;
}
// A function can be instantiated when the runtime can bind every attr
// with a value. When a GraphDef has a call to a function, it must
// have binding for every attr defined in the signature.
//
// TODO(zhifengc):
// * device spec, etc.
message FunctionDef {
// The definition of the function's name, arguments, return values,
// attrs etc.
OpDef signature = 1;
// The body of the function.
repeated Node node = 2; // function.node.ret[*] are unique.
// A node is a multi-value assignment:
// (ret[0], ret[1], ...) = func(arg[0], arg[1], ...)
//
// By convention, "func" is resolved by consulting with a user-defined
// library first. If not resolved, "func" is assumed to be a builtin op.
message Node {
// This node produces multiple outputs. They are named ret[0],
// ret[1], ..., etc.
//
// REQUIRES: function.node.ret[*] are unique across all nodes.
// REQUIRES: ret.size == func/op def's number of output args.
repeated string ret = 1;
// The op/function name.
string op = 2;
// Arguments passed to this func/op.
//
// arg[i] must be either one of
// function.signature.input_args[*].name or one of
// function.node[*].ret[*].
//
// REQUIRES: arg.size == func/op def's number of input args.
repeated string arg = 3;
// Control dependencies.
//
// dep[i] must be one of function.node[*].ret[*] or one of
// function.signature.input_args[*].name.
repeated string dep = 4;
// Attrs.
//
// 'attr' maps names defined by 'func's attr defs to attr values.
// attr values may have placeholders which are substituted
// recursively by concrete values when this node is instantiated.
// These placeholders must name an attr listed in the FunctionDef's
// signature.
map<string, AttrValue> attr = 5;
}
}
// GradientDef defines the gradient function of a function defined in
// a function library.
//
// A gradient function g (specified by gradient_func) for a function f
// (specified by function_name) must follow the following:
//
// The function 'f' must be a numerical function which takes N inputs
// and produces M outputs. Its gradient function 'g', which is a
// function taking N + M inputs and produces N outputs.
//
// I.e. if we have
// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),
// then, g is
// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,
// dL/dy1, dL/dy2, ..., dL/dy_M),
// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the
// loss function). dL/dx_i is the partial derivative of L with respect
// to x_i.
message GradientDef {
string function_name = 1; // The function name.
string gradient_func = 2; // The gradient function's name.
}

View File

@ -0,0 +1,112 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "GraphProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "attr_value.proto";
import "function.proto";
import "versions.proto";
// Represents the graph of operations
message GraphDef {
repeated NodeDef node = 1;
// Compatibility versions of the graph. See core/public/version.h for version
// history. The GraphDef version is distinct from the TensorFlow version, and
// each release of TensorFlow will support a range of GraphDef versions.
VersionDef versions = 4;
// Deprecated single version field; use versions above instead. Since all
// GraphDef changes before "versions" was introduced were forward
// compatible, this field is entirely ignored.
int32 version = 3 [deprecated = true];
// EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
//
// "library" provides user-defined functions.
//
// Naming:
// * library.function.name are in a flat namespace.
// NOTE: We may need to change it to be hierarchical to support
// different orgs. E.g.,
// { "/google/nn", { ... }},
// { "/google/vision", { ... }}
// { "/org_foo/module_bar", {...}}
// map<string, FunctionDefLib> named_lib;
// * If node[i].op is the name of one function in "library",
// node[i] is deemed as a function call. Otherwise, node[i].op
// must be a primitive operation supported by the runtime.
//
//
// Function call semantics:
//
// * The callee may start execution as soon as some of its inputs
// are ready. The caller may want to use Tuple() mechanism to
// ensure all inputs are ready in the same time.
//
// * The consumer of return values may start executing as soon as
// the return values the consumer depends on are ready. The
// consumer may want to use Tuple() mechanism to ensure the
// consumer does not start until all return values of the callee
// function are ready.
FunctionDefLibrary library = 2;
};
message NodeDef {
// The name given to this operator. Used for naming inputs,
// logging, visualization, etc. Unique within a single GraphDef.
// Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_./]*".
string name = 1;
// The operation name. There may be custom parameters in attrs.
// Op names starting with an underscore are reserved for internal use.
string op = 2;
// Each input is "node:src_output" with "node" being a string name and
// "src_output" indicating which output tensor to use from "node". If
// "src_output" is 0 the ":0" suffix can be omitted. Regular inputs
// may optionally be followed by control inputs that have the format
// "^node".
repeated string input = 3;
// A (possibly partial) specification for the device on which this
// node should be placed.
// The expected syntax for this string is as follows:
//
// DEVICE_SPEC ::= COLOCATED_NODE | PARTIAL_SPEC
//
// COLOCATED_NODE ::= "@" NODE_NAME // See NodeDef.name above.
// PARTIAL_SPEC ::= ("/" CONSTRAINT) *
// CONSTRAINT ::= ("job:" JOB_NAME)
// | ("replica:" [1-9][0-9]*)
// | ("task:" [1-9][0-9]*)
// | ( ("gpu" | "cpu") ":" ([1-9][0-9]* | "*") )
//
// Valid values for this string include:
// * "@other/node" (colocate with "other/node")
// * "/job:worker/replica:0/task:1/gpu:3" (full specification)
// * "/job:worker/gpu:3" (partial specification)
// * "" (no specification)
//
// If the constraints do not resolve to a single device (or if this
// field is empty or not present), the runtime will attempt to
// choose a device automatically.
string device = 4;
// Operation-specific graph-construction-time configuration.
// Note that this should include all attrs defined in the
// corresponding OpDef, including those with a value matching
// the default -- this allows the default to change and makes
// NodeDefs easier to interpret on their own. However, if
// an attr with a default is not specified in this list, the
// default will be used.
// The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and
// one of the names from the corresponding OpDef's attr field).
// The values must have a type matching the corresponding OpDef
// attr's type field.
// TODO(josh11b): Add some examples here showing best practices.
map<string, AttrValue> attr = 5;
};

View File

@ -0,0 +1,157 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "OpDefProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "attr_value.proto";
import "types.proto";
// Defines an operation. A NodeDef in a GraphDef specifies an Op by
// using the "op" field which should match the name of a OpDef.
message OpDef {
// Op names starting with an underscore are reserved for internal use.
// Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9_]*".
string name = 1;
// For describing inputs and outputs.
message ArgDef {
// Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*".
string name = 1;
// Human readable description.
string description = 2;
// Describes the type of one or more tensors that are accepted/produced
// by this input/output arg. The only legal combinations are:
// * For a single tensor: either the "type" field is set or the
// "type_attr" field is set to the name of an attr with type "type".
// * For a sequence of tensors with the same type: the "number_attr"
// field will be set to the name of an attr with type "int", and
// either the "type" or "type_attr" field will be set as for
// single tensors.
// * For a sequence of tensors, the "type_list_attr" field will be set
// to the name of an attr with type "list(type)".
DataType type = 3;
string type_attr = 4; // if specified, attr must have type "type"
string number_attr = 5; // if specified, attr must have type "int"
// If specified, attr must have type "list(type)", and none of
// type, type_attr, and number_attr may be specified.
string type_list_attr = 6;
// For inputs: if true, the inputs are required to be refs.
// By default, inputs can be either refs or non-refs.
// For outputs: if true, outputs are refs, otherwise they are not.
bool is_ref = 16;
};
// Description of the input(s).
repeated ArgDef input_arg = 2;
// Description of the output(s).
repeated ArgDef output_arg = 3;
// Description of the graph-construction-time configuration of this
// Op. That is to say, this describes the attr fields that will
// be specified in the NodeDef.
message AttrDef {
// A descriptive name for the argument. May be used, e.g. by the
// Python client, as a keyword argument name, and so should match
// the regexp "[a-z][a-z0-9_]+".
string name = 1;
// One of the type names from attr_value.proto ("string", "list(string)",
// "int", etc.).
string type = 2;
// A reasonable default for this attribute if the user does not supply
// a value. If not specified, the user must supply a value.
AttrValue default_value = 3;
// Human-readable description.
string description = 4;
// TODO(josh11b): bool is_optional?
// --- Constraints ---
// These constraints are only in effect if specified. Default is no
// constraints.
// For type == "int", this is a minimum value. For "list(___)"
// types, this is the minimum length.
bool has_minimum = 5;
int64 minimum = 6;
// The set of allowed values. Has type that is the "list" version
// of the "type" field above (uses the "list" field of AttrValue).
// If type == "type" or "list(type)" above, then the "type" field
// of "allowed_values.list" has the set of allowed DataTypes.
// If type == "string" or "list(string)", then the "s" field of
// "allowed_values.list" has the set of allowed strings.
AttrValue allowed_values = 7;
}
repeated AttrDef attr = 4;
// Optional deprecation based on GraphDef versions.
OpDeprecation deprecation = 8;
// One-line human-readable description of what the Op does.
string summary = 5;
// Additional, longer human-readable description of what the Op does.
string description = 6;
// -------------------------------------------------------------------------
// Which optimizations this operation can participate in.
// True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs)
bool is_commutative = 18;
// If is_aggregate is true, then this operation accepts N >= 2
// inputs and produces 1 output all of the same type. Should be
// associative and commutative, and produce output with the same
// shape as the input. The optimizer may replace an aggregate op
// taking input from multiple devices with a tree of aggregate ops
// that aggregate locally within each device (and possibly within
// groups of nearby devices) before communicating.
// TODO(josh11b): Implement that optimization.
bool is_aggregate = 16; // for things like add
// Other optimizations go here, like
// can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc.
// -------------------------------------------------------------------------
// Optimization constraints.
// By default Ops may be moved between devices. Stateful ops should
// either not be moved, or should only be moved if that state can also
// be moved (e.g. via some sort of save / restore).
// Stateful ops are guaranteed to never be optimized away by Common
// Subexpression Elimination (CSE).
bool is_stateful = 17; // for things like variables, queue
// -------------------------------------------------------------------------
// Non-standard options.
// By default, all inputs to an Op must be initialized Tensors. Ops
// that may initialize tensors for the first time should set this
// field to true, to allow the Op to take an uninitialized Tensor as
// input.
bool allows_uninitialized_input = 19; // for Assign, etc.
};
// Information about version-dependent deprecation of an op
message OpDeprecation {
// First GraphDef version at which the op is disallowed.
int32 version = 1;
// Explanation of why it was deprecated and what to use instead.
string explanation = 2;
};
// A collection of OpDefs
message OpList {
repeated OpDef op = 1;
};

View File

@ -0,0 +1,68 @@
syntax = "proto3";
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "TensorProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
import "tensor_shape.proto";
import "types.proto";
// Protocol buffer representing a tensor.
message TensorProto {
DataType dtype = 1;
// Shape of the tensor. TODO(touts): sort out the 0-rank issues.
TensorShapeProto tensor_shape = 2;
// Only one of the representations below is set, one of "tensor_contents" and
// the "xxx_val" attributes. We are not using oneof because as oneofs cannot
// contain repeated fields it would require another extra set of messages.
// Version number.
//
// In version 0, if the "repeated xxx" representations contain only one
// element, that element is repeated to fill the shape. This makes it easy
// to represent a constant Tensor with a single value.
int32 version_number = 3;
// Serialized content from Tensor::AsProtoTensorContent(). This representation
// can be used for all tensor types.
bytes tensor_content = 4;
// Type specific representations that make it easy to create tensor protos in
// all languages. Only the representation corresponding to "dtype" can
// be set. The values hold the flattened representation of the tensor in
// row major order.
// DT_HALF. Note that since protobuf has no int16 type, we'll have some
// pointless zero padding for each value here.
repeated int32 half_val = 13 [packed = true];
// DT_FLOAT.
repeated float float_val = 5 [packed = true];
// DT_DOUBLE.
repeated double double_val = 6 [packed = true];
// DT_INT32, DT_INT16, DT_INT8, DT_UINT8.
repeated int32 int_val = 7 [packed = true];
// DT_STRING
repeated bytes string_val = 8;
// DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real
// and imaginary parts of i-th single precision complex.
repeated float scomplex_val = 9 [packed = true];
// DT_INT64
repeated int64 int64_val = 10 [packed = true];
// DT_BOOL
repeated bool bool_val = 11 [packed = true];
// DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real
// and imaginary parts of i-th double precision complex.
repeated double dcomplex_val = 12 [packed = true];
};

Some files were not shown because too many files have changed in this diff Show More