Skip to content

Commit

Permalink
Added Halide backend support for deep learning layers
Browse files Browse the repository at this point in the history
  • Loading branch information
dkurt committed Jun 15, 2017
1 parent 6f4179d commit fc18830
Show file tree
Hide file tree
Showing 26 changed files with 3,112 additions and 7 deletions.
127 changes: 127 additions & 0 deletions modules/dnn/include/opencv2/dnn/dnn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,23 @@ namespace dnn //! This namespace is used for dnn module functionlaity.

typedef std::vector<int> MatShape;

/**
* @brief Enum of computation backends supported by layers.
*/
enum Backend
{
DNN_BACKEND_DEFAULT,
DNN_BACKEND_HALIDE
};

/**
* @brief Enum of target devices for computations.
*/
enum Target
{
DNN_TARGET_CPU
};

/** @brief Initialize dnn module and built-in layers.
*
* This function automatically called on most of OpenCV builds,
Expand All @@ -77,6 +94,54 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
String type; //!< Type name which was used for creating layer by layer factory (optional).
};

/**
* @brief Derivatives of this class encapsulates functions of certain backends.
*/
class BackendNode
{
public:
BackendNode(int backendId);

virtual ~BackendNode(); //!< Virtual destructor to make polymorphism.

int backendId; //!< Backend identifier.
};

/**
* @brief Derivatives of this class wraps cv::Mat for different backends and targets.
*/
class BackendWrapper
{
public:
BackendWrapper(int backendId, int targetId);

/**
* @brief Wrap cv::Mat for specific backend and target.
* @param[in] targetId Target identifier.
* @param[in] m cv::Mat for wrapping.
*
* Make CPU->GPU data transfer if it's require for the target.
*/
BackendWrapper(int targetId, const cv::Mat& m);

/**
* @brief Make wrapper for reused cv::Mat.
* @param[in] base Wrapper of cv::Mat that will be reused.
* @param[in] shape Specific shape.
*
* Initialize wrapper from another one. It'll wrap the same host CPU
* memory and mustn't allocate memory on device(i.e. GPU). It might
* has different shape. Use in case of CPU memory reusing for reuse
* associented memory on device too.
*/
BackendWrapper(const Ptr<BackendWrapper>& base, const MatShape& shape);

virtual ~BackendWrapper(); //!< Virtual destructor to make polymorphism.

int backendId; //!< Backend identifier.
int targetId; //!< Target identifier.
};

/** @brief This interface class allows to build new Layers - are building blocks of networks.
*
* Each class, derived from Layer, must implement allocate() methods to declare own outputs and forward() to compute outputs.
Expand Down Expand Up @@ -131,6 +196,50 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
*/
virtual int outputNameToIndex(String outputName);

/**
* @brief Ask layer if it support specific backend for doing computations.
* @param[in] backendId computation backend identifier.
* @see Backend
*/
virtual bool supportBackend(int backendId);

/**
* @brief Returns Halide backend node.
* @param[in] inputs Input Halide buffers.
* @see BackendNode, BackendWrapper
*
* Input buffers should be exactly the same that will be used in forward invocations.
* Despite we can use Halide::ImageParam based on input shape only,
* it helps prevent some memory management issues (if something wrong,
* Halide tests will be failed).
*/
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs);

/**
* @brief Automatic Halide scheduling based on layer hyper-parameters.
* @param[in] node Backend node with Halide functions.
* @param[in] inputs Blobs that will be used in forward invocations.
* @param[in] outputs Blobs that will be used in forward invocations.
* @see BackendNode
*
* Layer don't use own Halide::Func members because we can have applied
* layers fusing. In this way the fused function should be scheduled.
*/
virtual void applyHalideScheduler(Ptr<BackendNode>& node,
const std::vector<Mat*> &inputs,
const std::vector<Mat> &outputs) const;

/**
* @brief Implement layers fusing.
* @param[in] node Backend node of bottom layer.
* @see BackendNode
*
* Actual for graph-based backends. If layer attached successfully,
* returns non-empty cv::Ptr to node of the same backend.
* Fuse only over the last function.
*/
virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node);

virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
Expand Down Expand Up @@ -251,6 +360,24 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
/** @overload */
void forwardOpt(const std::vector<LayerId> &toLayers);

/**
* @brief Compile Halide layers.
* @param[in] scheduler Path to YAML file with scheduling directives.
* @see setPreferableBackend
*
* Schedule layers that support Halide backend. Then compile them for
* specific target. For layers that not represented in scheduling file
* or if no manual scheduling used at all, automatic scheduling will be applied.
*/
void compileHalide(const std::string& scheduler = "");

/**
* @brief Ask network to use specific computation backend where it supported.
* @param[in] backendId backend identifier.
* @see Backend
*/
void setPreferableBackend(int backendId);

/** @brief Sets the new value for the layer output blob
* @param outputName descriptor of the updating layer output blob.
* @param blob new blob.
Expand Down
141 changes: 141 additions & 0 deletions modules/dnn/perf/perf_halide_net.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.

namespace cvtest
{

#ifdef HAVE_HALIDE
using namespace cv;
using namespace dnn;

static void loadNet(const std::string& weights, const std::string& proto,
const std::string& scheduler, int inWidth, int inHeight,
const std::string& outputLayer, const std::string& framework,
int targetId, Net* net, int* outputLayerId)
{
Mat input(inHeight, inWidth, CV_32FC3);
randu(input, 0.0f, 1.0f);

if (framework == "caffe")
{
*net = cv::dnn::readNetFromCaffe(proto, weights);
}
else if (framework == "torch")
{
*net = cv::dnn::readNetFromTorch(weights);
}
else if (framework == "tensorflow")
{
*net = cv::dnn::readNetFromTensorflow(weights);
}
else
CV_Error(Error::StsNotImplemented, "Unknown framework " + framework);

net->setBlob("", cv::dnn::blobFromImage(input, 1.0, false));
net->setPreferableBackend(DNN_BACKEND_HALIDE);
net->compileHalide(scheduler);
*outputLayerId = net->getLayerId(outputLayer);
net->forward(*outputLayerId);
}

PERF_TEST(GoogLeNet, HalidePerfTest)
{
Net net;
int outputLayerId;
loadNet(findDataFile("dnn/bvlc_googlenet.caffemodel"),
findDataFile("dnn/bvlc_googlenet.prototxt"),
"", 227, 227, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);

TEST_CYCLE_N(10)
{
net.forward(outputLayerId);
}
SANITY_CHECK_NOTHING();
}

PERF_TEST(AlexNet, HalidePerfTest)
{
Net net;
int outputLayerId;
loadNet(findDataFile("dnn/bvlc_alexnet.caffemodel"),
findDataFile("dnn/bvlc_alexnet.prototxt"),
findDataFile("dnn/halide_scheduler_alexnet.yml"),
227, 227, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);

TEST_CYCLE_N(10)
{
net.forward(outputLayerId);
}
SANITY_CHECK_NOTHING();
}

// PERF_TEST(ResNet50, HalidePerfTest)
// {
// Net net;
// int outputLayerId;
// loadNet(findDataFile("dnn/ResNet-50-model.caffemodel"),
// findDataFile("dnn/ResNet-50-deploy.prototxt"),
// findDataFile("dnn/halide_scheduler_resnet_50.yml"),
// 224, 224, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);
//
// TEST_CYCLE_N(10)
// {
// net.forward(outputLayerId);
// }
// SANITY_CHECK_NOTHING();
// }

// PERF_TEST(SqueezeNet_v1_1, HalidePerfTest)
// {
// Net net;
// int outputLayerId;
// loadNet(findDataFile("dnn/squeezenet_v1_1.caffemodel"),
// findDataFile("dnn/squeezenet_v1_1.prototxt"),
// findDataFile("dnn/halide_scheduler_squeezenet_v1_1.yml"),
// 227, 227, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);
//
// TEST_CYCLE_N(10)
// {
// net.forward(outputLayerId);
// }
// SANITY_CHECK_NOTHING();
// }

PERF_TEST(Inception_5h, HalidePerfTest)
{
Net net;
int outputLayerId;
loadNet(findDataFile("dnn/tensorflow_inception_graph.pb"), "",
findDataFile("dnn/halide_scheduler_inception_5h.yml"),
224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU,
&net, &outputLayerId);

TEST_CYCLE_N(10)
{
net.forward(outputLayerId);
}
SANITY_CHECK_NOTHING();
}

PERF_TEST(ENet, HalidePerfTest)
{
Net net;
int outputLayerId;
loadNet(findDataFile("dnn/Enet-model-best.net"), "",
findDataFile("dnn/halide_scheduler_enet.yml"),
512, 256, "l367_Deconvolution", "torch", DNN_TARGET_CPU,
&net, &outputLayerId);

TEST_CYCLE_N(10)
{
net.forward(outputLayerId);
}
SANITY_CHECK_NOTHING();
}
#endif // HAVE_HALIDE

} // namespace cvtest
11 changes: 10 additions & 1 deletion modules/dnn/perf/perf_main.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,12 @@
#include "perf_precomp.hpp"

CV_PERF_TEST_MAIN(dnn)
static const char* extraTestDataPath =
#ifdef WINRT
NULL;
#else
getenv("OPENCV_DNN_TEST_DATA_PATH");
#endif

CV_PERF_TEST_MAIN(dnn,
extraTestDataPath ? (void)cvtest::addDataSearchPath(extraTestDataPath) : (void)0
)
Loading

0 comments on commit fc18830

Please sign in to comment.