1313
1414app = FastAPI ()
1515
16+ # Settings
1617MIN_CONFIDENCE = 0.1 # The absolute lowest confidence for a detection.
17-
18+ # URL
1819FACE_DETECTION_URL = "/v1/vision/face"
19- FACE_MODEL = "models/face_detection/mobilenet_ssd_v2_face/mobilenet_ssd_v2_face_quant_postprocess.tflite"
20-
2120OBJ_DETECTION_URL = "/v1/vision/detection"
21+ SCENE_URL = "/v1/vision/scene"
22+ # Models and labels
23+ FACE_MODEL = "models/face_detection/mobilenet_ssd_v2_face/mobilenet_ssd_v2_face_quant_postprocess.tflite"
2224OBJ_MODEL = "models/object_detection/mobilenet_ssd_v2_coco/mobilenet_ssd_v2_coco_quant_postprocess.tflite"
2325OBJ_LABELS = "models/object_detection/mobilenet_ssd_v2_coco/coco_labels.txt"
24-
25- SCENE_URL = "/v1/vision/scene"
2626SCENE_MODEL = "models/classification/dogs-vs-cats/model.tflite"
2727SCENE_LABELS = "models/classification/dogs-vs-cats/labels.txt"
2828
4040face_interpreter .allocate_tensors ()
4141face_input_details = face_interpreter .get_input_details ()
4242face_output_details = face_interpreter .get_output_details ()
43- face_input_height = face_input_details [0 ]["shape" ][1 ] # 320
44- face_input_width = face_input_details [0 ]["shape" ][2 ] # 320
43+ face_input_height = face_input_details [0 ]["shape" ][1 ]
44+ face_input_width = face_input_details [0 ]["shape" ][2 ]
4545
4646# Setup face detection
4747scene_interpreter = tflite .Interpreter (model_path = SCENE_MODEL )
5555
5656@app .get ("/" )
5757async def info ():
58- return f"""
59- Object detection model: { OBJ_MODEL .split ("/" )[- 2 ]}
60- Face detection model: { FACE_MODEL .split ("/" )[- 2 ]}
61- Scene model: { SCENE_MODEL .split ("/" )[- 2 ]}
62- """
58+ return """tflite-server docs at ip:port/docs"""
6359
6460
6561@app .post (FACE_DETECTION_URL )
6662async def predict_face (image : UploadFile = File (...)):
67- data = {"success" : False }
6863 try :
6964 contents = await image .read ()
70- image = Image .open (io .BytesIO (contents )) # A PIL image
65+ image = Image .open (io .BytesIO (contents ))
7166 image_width = image .size [0 ]
7267 image_height = image .size [1 ]
7368
7469 # Format data and send to interpreter
75- resized_image = image .resize ((face_input_width , face_input_height ))
70+ resized_image = image .resize ((face_input_width , face_input_height ), Image . ANTIALIAS )
7671 input_data = np .expand_dims (resized_image , axis = 0 )
7772 face_interpreter .set_tensor (face_input_details [0 ]["index" ], input_data )
7873
@@ -82,13 +77,14 @@ async def predict_face(image: UploadFile = File(...)):
8277 classes = face_interpreter .get_tensor (face_output_details [1 ]["index" ])[0 ]
8378 scores = face_interpreter .get_tensor (face_output_details [2 ]["index" ])[0 ]
8479
80+ data = {}
8581 faces = []
8682 for i in range (len (scores )):
8783 if not classes [i ] == 0 : # Face
8884 continue
8985 single_face = {}
90- single_face ["confidence" ] = float (scores [i ])
9186 single_face ["userid" ] = "unknown"
87+ single_face ["confidence" ] = float (scores [i ])
9288 single_face ["y_min" ] = int (float (boxes [i ][0 ]) * image_height )
9389 single_face ["x_min" ] = int (float (boxes [i ][1 ]) * image_width )
9490 single_face ["y_max" ] = int (float (boxes [i ][2 ]) * image_height )
@@ -107,15 +103,14 @@ async def predict_face(image: UploadFile = File(...)):
107103
108104@app .post (OBJ_DETECTION_URL )
109105async def predict_object (image : UploadFile = File (...)):
110- data = {"success" : False }
111106 try :
112107 contents = await image .read ()
113- image = Image .open (io .BytesIO (contents )) # A PIL image
108+ image = Image .open (io .BytesIO (contents ))
114109 image_width = image .size [0 ]
115110 image_height = image .size [1 ]
116111
117112 # Format data and send to interpreter
118- resized_image = image .resize ((obj_input_width , obj_input_height ))
113+ resized_image = image .resize ((obj_input_width , obj_input_height ), Image . ANTIALIAS )
119114 input_data = np .expand_dims (resized_image , axis = 0 )
120115 obj_interpreter .set_tensor (obj_input_details [0 ]["index" ], input_data )
121116
@@ -125,6 +120,7 @@ async def predict_object(image: UploadFile = File(...)):
125120 classes = obj_interpreter .get_tensor (obj_output_details [1 ]["index" ])[0 ]
126121 scores = obj_interpreter .get_tensor (obj_output_details [2 ]["index" ])[0 ]
127122
123+ data = {}
128124 objects = []
129125 for i in range (len (scores )):
130126 single_object = {}
@@ -149,21 +145,13 @@ async def predict_object(image: UploadFile = File(...)):
149145
150146@app .post (SCENE_URL )
151147async def predict_scene (image : UploadFile = File (...)):
152- data = {"success" : False }
153148 try :
154149 contents = await image .read ()
155- image = Image .open (io .BytesIO (contents )) # A PIL image
156- # Format data and send to interpreter
157- resized_image = image .resize (
158- (scene_input_width , scene_input_height ), Image .ANTIALIAS
159- )
150+ image = Image .open (io .BytesIO (contents ))
151+ resized_image = image .resize ((scene_input_width , scene_input_height ), Image .ANTIALIAS )
160152 results = classify_image (scene_interpreter , image = resized_image )
161-
162- print (
163- f"results[0]: { results [0 ]} " , file = sys .stderr ,
164- )
165153 label_id , prob = results [0 ]
166-
154+ data = {}
167155 data ["label" ] = scene_labels [label_id ]
168156 data ["confidence" ] = prob
169157 data ["success" ] = True
0 commit comments