Skip to content

Commit 43a69ba

Browse files
committed
rename var file to image
1 parent e55b3aa commit 43a69ba

File tree

2 files changed

+21
-53
lines changed

2 files changed

+21
-53
lines changed

README.md

Lines changed: 14 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -20,50 +20,41 @@ If you want to create custom models, there is the easy way, and the longer but m
2020
## Usage
2121
Start the tflite-server on port 5000 :
2222
```
23-
(venv) $ uvicorn tflite-server:app --reload --port 5000
23+
(venv) $ uvicorn tflite-server:app --reload --port 5000 --host 0.0.0.0
2424
```
2525

2626
You can check that the tflite-server is running by visiting `http://ip:5000/` from any machine, where `ip` is the ip address of the host (`localhost` if querying from the same machine). The docs can be viewed at `http://localhost:5000/docs`
2727

2828
Post an image to detecting objects via cURL:
2929
```
30-
curl -X POST "http://localhost:5000/v1/vision/detection" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@tests/people_car.jpg;type=image/jpeg"
30+
curl -X POST "http://localhost:5000/v1/vision/detection" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "image=@tests/people_car.jpg;type=image/jpeg"
3131
```
3232
Which should return:
3333
```
3434
{
3535
"predictions": [
3636
{
37-
"confidence": 0.93359375,
38-
"label": "car",
39-
"x_max": 619,
40-
"x_min": 302,
41-
"y_max": 348,
37+
"confidence": 0.93359375,
38+
"label": "car",
39+
"x_max": 619,
40+
"x_min": 302,
41+
"y_max": 348,
4242
"y_min": 120
43-
},
43+
},
4444
{
45-
"confidence": 0.7890625,
46-
"label": "person",
47-
"x_max": 363,
48-
"x_min": 275,
49-
"y_max": 323,
45+
"confidence": 0.7890625,
46+
"label": "person",
47+
"x_max": 363,
48+
"x_min": 275,
49+
"y_max": 323,
5050
"y_min": 126
5151
},
5252
.
5353
.
5454
.
5555
'success': True}
5656
```
57-
58-
To detect faces:
59-
```
60-
curl -X POST "http://localhost:5000/v1/vision/face" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@tests/faces.jpg;type=image/jpeg"
61-
```
62-
63-
To detect the scene (dogs vs cats model):
64-
```
65-
curl -X POST "http://localhost:5000/v1/vision/scene" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@tests/cat.jpg;type=image/jpeg"
66-
```
57+
An example request using the python requests package is in `tests/live-test.py`
6758

6859
## Add tflite-server as a service
6960
You can run tflite-server as a [service](https://www.raspberrypi.org/documentation/linux/usage/systemd.md), which means tflite-server will automatically start on RPi boot, and can be easily started & stopped. Create the service file in the appropriate location on the RPi using: ```sudo nano /etc/systemd/system/tflite-server.service```

tflite-server.py

Lines changed: 7 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
Expose tflite models via a rest API.
33
"""
44
import io
5-
import logging
65
import sys
76

87
import numpy as np
@@ -14,14 +13,6 @@
1413

1514
app = FastAPI()
1615

17-
LOGFORMAT = "%(asctime)s %(levelname)s %(name)s : %(message)s"
18-
logging.basicConfig(
19-
# filename="tflite-server.log", # select filename or stream
20-
stream=sys.stdout,
21-
level=logging.DEBUG,
22-
format=LOGFORMAT,
23-
)
24-
2516
MIN_CONFIDENCE = 0.1 # The absolute lowest confidence for a detection.
2617

2718
FACE_DETECTION_URL = "/v1/vision/face"
@@ -72,14 +63,10 @@ async def info():
7263

7364

7465
@app.post(FACE_DETECTION_URL)
75-
async def predict_face(file: UploadFile = File(...)):
66+
async def predict_face(image: UploadFile = File(...)):
7667
data = {"success": False}
77-
if file.content_type.startswith("image/") is False:
78-
raise HTTPException(
79-
status_code=400, detail=f"File '{file.filename}' is not an image."
80-
)
8168
try:
82-
contents = await file.read()
69+
contents = await image.read()
8370
image = Image.open(io.BytesIO(contents)) # A PIL image
8471
image_width = image.size[0]
8572
image_height = image.size[1]
@@ -92,9 +79,7 @@ async def predict_face(file: UploadFile = File(...)):
9279
# Process image and get predictions
9380
face_interpreter.invoke()
9481
boxes = face_interpreter.get_tensor(face_output_details[0]["index"])[0]
95-
classes = face_interpreter.get_tensor(face_output_details[1]["index"])[
96-
0
97-
]
82+
classes = face_interpreter.get_tensor(face_output_details[1]["index"])[0]
9883
scores = face_interpreter.get_tensor(face_output_details[2]["index"])[0]
9984

10085
faces = []
@@ -121,14 +106,10 @@ async def predict_face(file: UploadFile = File(...)):
121106

122107

123108
@app.post(OBJ_DETECTION_URL)
124-
async def predict_object(file: UploadFile = File(...)):
109+
async def predict_object(image: UploadFile = File(...)):
125110
data = {"success": False}
126-
if file.content_type.startswith("image/") is False:
127-
raise HTTPException(
128-
status_code=400, detail=f"File '{file.filename}' is not an image."
129-
)
130111
try:
131-
contents = await file.read()
112+
contents = await image.read()
132113
image = Image.open(io.BytesIO(contents)) # A PIL image
133114
image_width = image.size[0]
134115
image_height = image.size[1]
@@ -167,14 +148,10 @@ async def predict_object(file: UploadFile = File(...)):
167148

168149

169150
@app.post(SCENE_URL)
170-
async def predict_scene(file: UploadFile = File(...)):
151+
async def predict_scene(image: UploadFile = File(...)):
171152
data = {"success": False}
172-
if file.content_type.startswith("image/") is False:
173-
raise HTTPException(
174-
status_code=400, detail=f"File '{file.filename}' is not an image."
175-
)
176153
try:
177-
contents = await file.read()
154+
contents = await image.read()
178155
image = Image.open(io.BytesIO(contents)) # A PIL image
179156
# Format data and send to interpreter
180157
resized_image = image.resize(

0 commit comments

Comments
 (0)