-
Notifications
You must be signed in to change notification settings - Fork 0
/
infer_class.py
executable file
·44 lines (39 loc) · 1.49 KB
/
infer_class.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import tensorflow as tf
from tensorflow import keras
from src.model import ModelMaker
from src.metrics import precision_m, recall_m, f1_m
# Class method implementation for Multiple Deployment strategies
class InferModel:
def __init__(self, model_path, model_name):
self.description = "Class method implementation for Multiple Deployment Strategies \
Current Supported - Direct Model Load, Tensorflow Serving"
self.model_path = model_path
self.model_name = model_name
self.model_maker = ModelMaker()
self.custom_objects = {
"precision_m": precision_m,
"recall_m": recall_m,
"f1_m": f1_m,
}
def direct_model_load(self):
if self.model_path.split(".")[-1] in ["h5", "hdf5"]:
model = keras.models.load_model(
self.model_path, custom_objects=self.custom_objects
)
# Loaded Model Summary
print(model.summary())
return model
elif os.path.isdir(self.model_path):
model = tf.saved_model.load(
self.model_path, custom_objects=self.custom_objects
)
print(model.summary())
return model
else:
raise Exception(
"Could not load the model- No model present in the given path in acceptable format \
Please check and rerun the code"
)
# Tensorflow Serving
# Triton Serving
# Web App - TF JS