Skip to content
This repository was archived by the owner on May 11, 2024. It is now read-only.

Commit 2a9d5b5

Browse files
author
Tony Reina
committed
Updating openvino scripts
1 parent 4492d1f commit 2a9d5b5

File tree

3 files changed

+74
-37
lines changed

3 files changed

+74
-37
lines changed

single-node/openvino_saved_model/README.md

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Intel [OpenVINO](https://software.intel.com/en-us/openvino-toolkit) integration
1+
# Intel [OpenVINO](https://software.intel.com/en-us/openvino-toolkit) integration
22

33
### How to freeze a saved TensorFlow/Keras model and convert it to OpenVINO format
44

@@ -25,9 +25,9 @@ The CONDA_PREFIX should be something like /home/bduser/anaconda3/envs/tf112_mkl_
2525
It refers to where your Conda packages are installed for this environment.
2626
It'd be nice if there were an easier way to find freeze_graph.py
2727

28-
`python ${CONDA_PREFIX}/lib/python3.6/site-packages/tensorflow/python/tools/freeze_graph.py
29-
--input_saved_model_dir saved_2dunet_model_protobuf/
30-
--output_node_names "PredictionMask/Sigmoid"
28+
`python ${CONDA_PREFIX}/lib/python3.6/site-packages/tensorflow/python/tools/freeze_graph.py
29+
--input_saved_model_dir saved_2dunet_model_protobuf/
30+
--output_node_names "PredictionMask/Sigmoid"
3131
--output_graph saved_model_frozen.pb
3232
--output_dir frozen_model
3333
`
@@ -45,6 +45,20 @@ Then,
4545
`python ${INTEL_CVSDK_DIR}/deployment_tools/model_optimizer/mo_tf.py --input_model ../frozen_model/saved_model_frozen.pb --input_shape=[1,144,144,4] --data_type FP32 --output_dir FP32 --model_name saved_model
4646
`
4747

48-
4. Run the script `python create_validation_sample.py` which will select a few samples from the HDF5 datafile and save them to a separate NumPy datafile called `validation_data.npz`. The inference scripts will use this NumPy file.
48+
4. Run the script `python create_validation_sample.py` which will select a few samples from the HDF5 datafile and save them to a separate NumPy datafile called `validation_data.npz`. The inference scripts will use this NumPy file.
4949

5050
5. The scripts `inference_keras.py` and `inference_openvino.py` load the `validation_data.npz` data file and run inference. Add the `--plot` argument to the command line and the script will plot figures for each prediction.
51+
52+
NOTE: The baseline model uses UpSampling2D (Bilinear Interpolation). This is supported on OpenVINO via a shared TensorFlow MKL-DNN library. To build the library run the script:
53+
54+
`bash ${INTEL_CVSDK_DIR}/inference_engine/samples/build_samples.sh`
55+
56+
This should cause all of the OpenVINO shared libraries to be built on your system under the directory `${INTEL_CVSDK_DIR}/inference_engine/lib`. For CPU you'll need to link to `libcpu_extension_avx2.so`. For example,
57+
58+
`python inference_openvino.py -l ${INTEL_CVSDK_DIR}/inference_engine/lib/centos_7.4/intel64/libcpu_extension_avx2.so`
59+
60+
or
61+
62+
`python inference_openvino.py -l ${INTEL_CVSDK_DIR}/inference_engine/lib/ubuntu16.04/intel64/libcpu_extension_avx2.so`
63+
64+
depending on your operating system.

single-node/openvino_saved_model/inference_keras.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
description="Inference example for trained 2D U-Net model on BraTS.",
3535
add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
3636

37-
parser.add_argument("--inference_filename", default="../output/unet_model_for_inference.hdf5",
37+
parser.add_argument("--inference_filename", default="../output/unet_model_for_decathlon.hdf5",
3838
help="the Keras inference model filename")
3939

4040
args = parser.parse_args()

single-node/openvino_saved_model/inference_openvino.py

Lines changed: 54 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,17 @@
1515
limitations under the License.
1616
"""
1717

18+
"""
19+
OpenVINO Python Inference Script
20+
This will load the OpenVINO version of the model (IR)
21+
and perform inference on a few validation samples
22+
from the Decathlon dataset.
23+
24+
You'll need the extension library to handle the Resize_Bilinear operations.
25+
26+
python inference_openvino.py -l ${INTEL_CVSDK_DIR}/inference_engine/lib/centos_7.4/intel64/libcpu_extension_avx2.so
27+
28+
"""
1829

1930
import sys
2031
import os
@@ -111,13 +122,39 @@ def load_model(fp16=False):
111122

112123
return model_xml, model_bin
113124

125+
def print_stats(exec_net, input_data, n_channels, batch_size, input_blob, out_blob, args):
126+
127+
# Start sync inference
128+
log.info("Starting inference ({} iterations)".format(args.number_iter))
129+
infer_time = []
130+
for i in range(args.number_iter):
131+
t0 = time()
132+
res = exec_net.infer(inputs={input_blob: input_data[[0],:n_channels]})
133+
infer_time.append((time() - t0) * 1000)
134+
135+
average_inference = np.average(np.asarray(infer_time))
136+
log.info("Average running time of one batch: {:.5f} ms".format(average_inference))
137+
log.info("Images per second = {:.3f}".format(batch_size * 1000.0 / average_inference))
138+
139+
perf_counts = exec_net.requests[0].get_perf_counts()
140+
log.info("Performance counters:")
141+
log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format("name",
142+
"layer_type",
143+
"exec_type",
144+
"status",
145+
"real_time, us"))
146+
for layer, stats in perf_counts.items():
147+
log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer,
148+
stats["layer_type"],
149+
stats["exec_type"],
150+
stats["status"],
151+
stats["real_time"]))
152+
114153

115154
def build_argparser():
116155
parser = ArgumentParser()
117156
parser.add_argument("-number_iter", "--number_iter",
118157
help="Number of iterations", default=5, type=int)
119-
parser.add_argument("-perf_counts", "--perf_counts",
120-
help="Performance counts", default=1, type=int)
121158
parser.add_argument("-l", "--cpu_extension",
122159
help="MKLDNN (CPU)-targeted custom layers. "
123160
"Absolute path to a shared library with "
@@ -132,8 +169,11 @@ def build_argparser():
132169
type=str)
133170
parser.add_argument("-plot", "--plot", help="Plot results",
134171
default=False, action="store_true")
172+
parser.add_argument("-stats", "--stats", help="Plot the runtime statistics",
173+
default=False, action="store_true")
135174
return parser
136175

176+
137177
def main():
138178

139179
log.basicConfig(format="[ %(levelname)s ] %(message)s",
@@ -152,7 +192,8 @@ def main():
152192

153193
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
154194
net = IENetwork(model=model_xml, weights=model_bin)
155-
#net = IENetwork.from_ir(model=model_xml, weights=model_bin)
195+
#net = IENetwork.from_ir(model=model_xml, weights=model_bin) # Old API
196+
156197
if "CPU" in plugin.device:
157198
supported_layers = plugin.get_supported_layers(net)
158199
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
@@ -163,14 +204,17 @@ def main():
163204
log.error("Please try to specify cpu extensions library path "
164205
"in sample's command line parameters using -l "
165206
"or --cpu_extension command line argument")
207+
log.error("On CPU this is usually -l ${INTEL_CVSDK_DIR}/inference_engine/lib/centos_7.4/intel64/libcpu_extension_avx2.so")
208+
log.error("You may need to build the OpenVINO samples directory for this library to be created on your system.")
209+
log.error("e.g. bash ${INTEL_CVSDK_DIR}/inference_engine/samples/build_samples.sh will trigger the library to be built.")
210+
log.error("Replace 'centos_7.4' with the pathname on your computer e.g. ('ubuntu_16.04')")
166211
sys.exit(1)
167212

168213
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
169214
assert len(net.outputs) == 1, "Sample supports only single output topologies"
170215

171-
log.info("Preparing input blobs")
172-
input_blob = next(iter(net.inputs))
173-
out_blob = next(iter(net.outputs))
216+
input_blob = next(iter(net.inputs)) # Name of the input layer
217+
out_blob = next(iter(net.outputs)) # Name of the output layer
174218

175219
batch_size, n_channels, height, width = net.inputs[input_blob].shape
176220
net.batch_size = batch_size
@@ -182,31 +226,10 @@ def main():
182226
exec_net = plugin.load(network=net)
183227
del net
184228

185-
# Start sync inference
186-
log.info("Starting inference ({} iterations)".format(args.number_iter))
187-
infer_time = []
188-
for i in range(args.number_iter):
189-
t0 = time()
190-
res = exec_net.infer(inputs={input_blob: input_data[[0]]})
191-
infer_time.append((time() - t0) * 1000)
192-
193-
average_inference = np.average(np.asarray(infer_time))
194-
log.info("Average running time of one batch: {:.5f} ms".format(average_inference))
195-
log.info("Images per second = {:.3f}".format(batch_size * 1000.0 / average_inference))
196-
if args.perf_counts:
197-
perf_counts = exec_net.requests[0].get_perf_counts()
198-
log.info("Performance counters:")
199-
log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format("name",
200-
"layer_type",
201-
"exec_type",
202-
"status",
203-
"real_time, us"))
204-
for layer, stats in perf_counts.items():
205-
log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer,
206-
stats["layer_type"],
207-
stats["exec_type"],
208-
stats["status"],
209-
stats["real_time"]))
229+
if args.stats:
230+
# Print the latency and throughput for inference
231+
print_stats(exec_net, input_data, n_channels,
232+
batch_size, input_blob, out_blob, args)
210233

211234
# Go through the sample validation dataset to plot predictions
212235
for idx, img_number in enumerate(img_indicies):

0 commit comments

Comments
 (0)