2
2
This is a sample class for a model. You may choose to use it as-is or make any changes to it.
3
3
This has been provided just to give you an idea of how to structure your model class.
4
4
'''
5
+ import cv2
6
+ import numpy as np
7
+ from openvino .inference_engine import IECore
5
8
6
- class Model_X :
9
+ class FaceDetectionModel :
7
10
'''
8
11
Class for the Face Detection Model.
9
12
'''
10
13
def __init__ (self , model_name , device = 'CPU' , extensions = None ):
11
14
'''
12
15
TODO: Use this to set your instance variables.
13
16
'''
14
- raise NotImplementedError
17
+ self .model_name = model_name
18
+ self .device = device
19
+ self .extensions = extensions
20
+ self .model_structure = self .model_name # model xml file
21
+ self .model_weights = self .model_name .split ('.' )[0 ]+ '.bin' # get model binary file path just use model xml file
22
+ self .plugin = None
23
+ self .network = None
24
+ self .exec_net = None
25
+ self .input_name = None
26
+ self .input_shape = None
27
+ self .output_names = None
28
+ self .output_shape = None
15
29
16
30
def load_model (self ):
17
31
'''
18
32
TODO: You will need to complete this method.
19
33
This method is for loading the model to the device specified by the user.
20
34
If your model requires any Plugins, this is where you can load them.
21
35
'''
22
- raise NotImplementedError
36
+ # load the IE Engine API plugin (Inference Engine entity)
37
+ self .plugin = IECore ()
38
+ # Reads a network from the IR files and creates an IENetwork, load IR files into their related class, architecture with XML and weights with binary file
39
+ self .network = self .plugin .read_network (model = self .model_structure , weights = self .model_weights )
40
+ # Queries the plugin with specified device name what network layers are supported in the current configuration.
41
+ # get the supported layers of the network
42
+ supported_layers = self .plugin .query_network (network = self .network , device_name = self .device )
43
+ # check unsupported layer
44
+ unsupported_layers = [ul for ul in self .network .layers .keys () if ul not in supported_layers ]
45
+
46
+ # condition of found unsupported layer and device is CPU
47
+ if len (unsupported_layers )!= 0 and self .device == 'CPU' :
48
+ print ('unsupported layers found:{}' .format (unsupported_layers ))
49
+ # extension is not None
50
+ if not self .extensions == None :
51
+ print ("Adding cpu_extension" )
52
+ # Loads extension library to the plugin with a specified device name.
53
+ self .plugin .add_extension (self .extensions , self .device )
54
+ # update the support and unsupported layers
55
+ supported_layers = self .plugin .query_network (network = self .network , device_name = self .device )
56
+ unsupported_layers = [ul for ul in self .network .layers .keys () if ul not in supported_layers ]
57
+ # if still no unsupported layer exit
58
+ if len (unsupported_layers )!= 0 :
59
+ print ("After adding the extensions still unsupported layers found" )
60
+ exit (1 )
61
+ print ("After adding the extension the issue is resolved" )
62
+ # extensions is None exit
63
+ else :
64
+ print ("Give the path of cpu extension" )
65
+ exit (1 )
66
+ # Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device
67
+ # load the network into the inference engine
68
+ self .exec_net = self .plugin .load_network (network = self .network , device_name = self .device , num_requests = 1 )
69
+
70
+ # Get the input layer, iterate through the inputs here
71
+ self .input_name = next (iter (self .network .inputs ))
72
+ # Return the shape of the input layer
73
+ self .input_shape self .network .inputs [self .input_name ].shape
74
+ # Get the output layer
75
+ self .output_names = netx (iter (self .network .outputs ))
76
+ # Return the shape of the output layer
77
+ self .output_shape = self .network .outputs [self .output_names ].shape
78
+
23
79
24
80
def predict (self , image ):
25
81
'''
26
82
TODO: You will need to complete this method.
27
83
This method is meant for running predictions on the input image.
28
84
'''
29
- raise NotImplementedError
85
+ # 1.process the image
86
+ img_processed = self .preprocess_input (image .copy ())
87
+ # 2.Starts synchronous inference for the first infer request of the executable network and returns output data.
88
+ # A dictionary that maps output layer names
89
+ outputs = self .exec_net .infer ({self .input_name :img_processed })
90
+ print (outputs )
91
+ # 3. process the outputs
92
+ coords = self .preprocess_output (outputs , prob_threshold )
93
+ # if coords empty, return 0,0
94
+ if (len (coords )== 0 ):
95
+ return 0 , 0
96
+ # get the first detected face
97
+ coords = coords [0 ]
98
+ h = image .shape [0 ]
99
+ w = image .shape [1 ]
100
+ print (coords )
101
+
102
+ coords = coords * np .array ([w , h , w , h ])
103
+ # Copy of the array, cast to a specified type. int32
104
+ coords = coords .astype (np .int32 )
105
+ print (coords )
106
+
107
+ cropped_face = image [coords [1 ]:coords [3 ], coords [0 ]:coords [2 ]]
108
+ print (cropped_face )
109
+
110
+ return cropped_face , coords
111
+
30
112
31
113
def check_model (self ):
32
- raise NotImplementedError
114
+ # raise NotImplementedError
115
+ pass
33
116
34
117
def preprocess_input (self , image ):
35
118
'''
36
119
Before feeding the data into the model for inference,
37
120
you might have to preprocess it. This function is where you can do that.
121
+ Given an input image, height and width:
38
122
'''
39
- raise NotImplementedError
123
+ # - Resize to height and width, (H, W), but resize use W, H which is opposite order
124
+ # print(self.input_shape)
125
+ image_resized = cv2 .resize (image , (self .input_shape [3 ], self .input .shape [2 ]))
126
+ # print(image_resized)
127
+ # - Transpose the final "channel" dimension to be first to BGR
128
+ # - Reshape the image to add a "batch" of 1 at the start
129
+ img_processed = np .transpose (np .expand_dims (image_resized , axis = 0 ), (0 ,3 ,1 ,2 ))
130
+ # print(img_processed) # BxCxHxW
131
+
132
+ return img_processed
133
+
40
134
41
135
def preprocess_output (self , outputs ):
42
136
'''
43
137
Before feeding the output of this model to the next model,
44
138
you might have to preprocess the output. This function is where you can do that.
45
139
'''
46
- raise NotImplementedError
140
+ coords = []
141
+ outs = outputs [self .output_names ][0 ][0 ] # output
142
+ for out in outs :
143
+ conf = out [2 ]
144
+ if conf > prob_threshold :
145
+ x_min = out [3 ]
146
+ y_min = out [4 ]
147
+ x_max = out [5 ]
148
+ y_max = out [6 ]
149
+ coords .append ([x_min , y_min , x_max , y_max ])
150
+ return coords
0 commit comments