Skip to content

Commit 2c28d87

Browse files
committed
Added Halide backend support for deep learning layers
1 parent 09b73b2 commit 2c28d87

26 files changed

+2958
-9
lines changed

modules/dnn/include/opencv2/dnn/dnn.hpp

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,23 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
5555

5656
typedef std::vector<int> MatShape;
5757

58+
/**
59+
* @brief Enum of computation backends supported by layers.
60+
*/
61+
enum Backend
62+
{
63+
DNN_BACKEND_DEFAULT,
64+
DNN_BACKEND_HALIDE
65+
};
66+
67+
/**
68+
* @brief Enum of target devices for computations.
69+
*/
70+
enum Target
71+
{
72+
DNN_TARGET_CPU
73+
};
74+
5875
/** @brief Initialize dnn module and built-in layers.
5976
*
6077
* This function automatically called on most of OpenCV builds,
@@ -77,6 +94,20 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
7794
String type; //!< Type name which was used for creating layer by layer factory (optional).
7895
};
7996

97+
/**
98+
* @brief This structure encapsulates pointers to functions of certain backend.
99+
*/
100+
struct BackendNode
101+
{
102+
BackendNode() {}
103+
104+
BackendNode(void* func) : funcs(1, func) {}
105+
106+
BackendNode(const std::vector<void*>& functions) : funcs(functions) {}
107+
108+
std::vector<void*> funcs; //!< Vector with pointers to backend functions.
109+
};
110+
80111
/** @brief This interface class allows to build new Layers - are building blocks of networks.
81112
*
82113
* Each class, derived from Layer, must implement allocate() methods to declare own outputs and forward() to compute outputs.
@@ -131,6 +162,51 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
131162
*/
132163
virtual int outputNameToIndex(String outputName);
133164

165+
/**
166+
* @brief Ask layer if it support specific backend for doing computations.
167+
* @param[in] backendId computation backend identifier.
168+
* @see Backend
169+
*/
170+
virtual bool supportBackend(int backendId);
171+
172+
/**
173+
* @brief Returns Halide backend node.
174+
* @param[in] inputs Pointers to input Halide buffers.
175+
* @see BackendNode
176+
*
177+
* Input buffers should be exactly the same that will be used in forward invocations.
178+
* Despite we can use Halide::ImageParam based on input shape only,
179+
* it helps prevent some memory management issues (if something wrong,
180+
* Halide tests will be failed).
181+
*/
182+
virtual BackendNode initHalide(const std::vector<void*> &inputs);
183+
184+
/**
185+
* @brief Automatic Halide scheduling based on layer hyper-parameters.
186+
* @param[in] node Backend node with Halide functions.
187+
* @param[in] inputs Blobs that will be used in forward invocations.
188+
* @param[in] outputs Blobs that will be used in forward invocations.
189+
* @see BackendNode
190+
*
191+
* Layer don't use own Halide::Func members because we can have applied
192+
* layers fusing. In this way the fused function should be scheduled.
193+
*/
194+
virtual void applyHalideScheduler(BackendNode& node,
195+
const std::vector<Mat*> &inputs,
196+
const std::vector<Mat> &outputs) const;
197+
198+
/**
199+
* @brief Implement layers fusing.
200+
* @param[in] backendId Specific backend identifier.
201+
* @param node Backend node of bottom layer.
202+
* @see BackendNode
203+
*
204+
* Actual for graph-based backends. If layer attached successfully,
205+
* returns true value. Fuse only the last function.
206+
* Fused function will be replaced to the same node.
207+
*/
208+
virtual bool tryAttach(int backendId, BackendNode& node);
209+
134210
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
135211
const int requiredOutputs,
136212
std::vector<MatShape> &outputs,
@@ -251,6 +327,25 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
251327
/** @overload */
252328
void forwardOpt(const std::vector<LayerId> &toLayers);
253329

330+
/**
331+
* @brief Compile Halide layers.
332+
* @param[in] targetId Compilation target of Halide code.
333+
* @param[in] scheduler Path to YAML file with scheduling directives.
334+
* @see Target
335+
*
336+
* Schedule layers that support Halide backend. Then compile them for
337+
* specific target. For layers that not represented in scheduling file
338+
* or if no manual scheduling used at all, automatic scheduling will be applied.
339+
*/
340+
void compileHalide(int targetId = DNN_TARGET_CPU, const std::string& scheduler = "");
341+
342+
/**
343+
* @brief Ask network to use specific computation backend where it supported.
344+
* @param[in] backendId backend identifier.
345+
* @see Backend
346+
*/
347+
void setPreferableBackend(int backendId);
348+
254349
/** @brief Sets the new value for the layer output blob
255350
* @param outputName descriptor of the updating layer output blob.
256351
* @param blob new blob.

modules/dnn/perf/perf_halide_net.cpp

Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
// This file is part of OpenCV project.
2+
// It is subject to the license terms in the LICENSE file found in the top-level directory
3+
// of this distribution and at http://opencv.org/license.html.
4+
//
5+
// Copyright (C) 2017, Intel Corporation, all rights reserved.
6+
// Third party copyrights are property of their respective owners.
7+
8+
namespace cvtest
9+
{
10+
11+
#ifdef HAVE_HALIDE
12+
using namespace cv;
13+
using namespace dnn;
14+
15+
static void loadNet(const std::string& weights, const std::string& proto,
16+
const std::string& scheduler, int inWidth, int inHeight,
17+
const std::string& outputLayer, const std::string& framework,
18+
Net* net, int* outputLayerId)
19+
{
20+
Mat input(inHeight, inWidth, CV_32FC3);
21+
randu(input, 0.0f, 1.0f);
22+
23+
if (framework == "caffe")
24+
{
25+
*net = cv::dnn::readNetFromCaffe(proto, weights);
26+
}
27+
else if (framework == "torch")
28+
{
29+
*net = cv::dnn::readNetFromTorch(weights);
30+
}
31+
else if (framework == "tensorflow")
32+
{
33+
*net = cv::dnn::readNetFromTensorflow(weights);
34+
}
35+
else
36+
CV_Error(Error::StsNotImplemented, "Unknown framework " + framework);
37+
38+
net->setBlob("", cv::dnn::blobFromImage(input, 1.0, false));
39+
net->setPreferableBackend(DNN_BACKEND_HALIDE);
40+
net->compileHalide(DNN_TARGET_CPU);
41+
*outputLayerId = net->getLayerId(outputLayer);
42+
net->forward(*outputLayerId);
43+
}
44+
45+
PERF_TEST(GoogLeNet, HalidePerfTest)
46+
{
47+
Net net;
48+
int outputLayerId;
49+
loadNet(findDataFile("dnn/bvlc_googlenet.caffemodel"),
50+
findDataFile("dnn/bvlc_googlenet.prototxt"),
51+
"", 227, 227, "prob", "caffe", &net, &outputLayerId);
52+
53+
TEST_CYCLE_N(10)
54+
{
55+
net.forward(outputLayerId);
56+
}
57+
SANITY_CHECK_NOTHING();
58+
}
59+
60+
PERF_TEST(AlexNet, HalidePerfTest)
61+
{
62+
Net net;
63+
int outputLayerId;
64+
loadNet(findDataFile("dnn/bvlc_alexnet.caffemodel"),
65+
findDataFile("dnn/bvlc_alexnet.prototxt"),
66+
findDataFile("dnn/halide_scheduler_alexnet.yml"),
67+
227, 227, "prob", "caffe", &net, &outputLayerId);
68+
69+
TEST_CYCLE_N(10)
70+
{
71+
net.forward(outputLayerId);
72+
}
73+
SANITY_CHECK_NOTHING();
74+
}
75+
76+
// PERF_TEST(ResNet50, HalidePerfTest)
77+
// {
78+
// Net net;
79+
// int outputLayerId;
80+
// loadNet(findDataFile("dnn/ResNet-50-model.caffemodel"),
81+
// findDataFile("dnn/ResNet-50-deploy.prototxt"),
82+
// findDataFile("dnn/halide_scheduler_resnet_50.yml"),
83+
// 224, 224, "prob", "caffe", &net, &outputLayerId);
84+
//
85+
// TEST_CYCLE_N(10)
86+
// {
87+
// net.forward(outputLayerId);
88+
// }
89+
// SANITY_CHECK_NOTHING();
90+
// }
91+
92+
// PERF_TEST(SqueezeNet_v1_1, HalidePerfTest)
93+
// {
94+
// Net net;
95+
// int outputLayerId;
96+
// loadNet(findDataFile("dnn/squeezenet_v1_1.caffemodel"),
97+
// findDataFile("dnn/squeezenet_v1_1.prototxt"),
98+
// findDataFile("dnn/halide_scheduler_squeezenet_v1_1.yml"),
99+
// 227, 227, "prob", "caffe", &net, &outputLayerId);
100+
//
101+
// TEST_CYCLE_N(10)
102+
// {
103+
// net.forward(outputLayerId);
104+
// }
105+
// SANITY_CHECK_NOTHING();
106+
// }
107+
108+
PERF_TEST(Inception_5h, HalidePerfTest)
109+
{
110+
Net net;
111+
int outputLayerId;
112+
loadNet(findDataFile("dnn/tensorflow_inception_graph.pb"), "",
113+
findDataFile("dnn/halide_scheduler_inception_5h.yml"),
114+
224, 224, "softmax2", "tensorflow", &net, &outputLayerId);
115+
116+
TEST_CYCLE_N(10)
117+
{
118+
net.forward(outputLayerId);
119+
}
120+
SANITY_CHECK_NOTHING();
121+
}
122+
123+
PERF_TEST(ENet, HalidePerfTest)
124+
{
125+
Net net;
126+
int outputLayerId;
127+
loadNet(findDataFile("dnn/Enet-model-best.net"), "",
128+
findDataFile("dnn/halide_scheduler_enet.yml"),
129+
512, 256, "l367_Deconvolution", "torch", &net, &outputLayerId);
130+
131+
TEST_CYCLE_N(10)
132+
{
133+
net.forward(outputLayerId);
134+
}
135+
SANITY_CHECK_NOTHING();
136+
}
137+
#endif // HAVE_HALIDE
138+
139+
} // namespace cvtest

modules/dnn/perf/perf_main.cpp

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,12 @@
11
#include "perf_precomp.hpp"
22

3-
CV_PERF_TEST_MAIN(dnn)
3+
static const char* extraTestDataPath =
4+
#ifdef WINRT
5+
NULL;
6+
#else
7+
getenv("OPENCV_DNN_TEST_DATA_PATH");
8+
#endif
9+
10+
CV_PERF_TEST_MAIN(dnn,
11+
extraTestDataPath ? (void)cvtest::addDataSearchPath(extraTestDataPath) : (void)0
12+
)

0 commit comments

Comments
 (0)