Skip to content

clean code for inference #64

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion deploy/models_utils/conversion/onnx2om.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,6 @@ python auto_gear.py --image_path=/xx/lsvt/images \
--rec_onnx_path=ch_ppocr_server_v2.0_rec_infer_argmax.onnx \
--rec_model_height=32 \
--soc_version=Ascend310P \
--output_path=./lsvt_om_v2n &&
--output_path=./lsvt_om_v2 &&
python auto_select.py --rec_model_path=lsvt_om_v2/crnn
popd
67 changes: 45 additions & 22 deletions deploy/mx_infer/args.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,20 @@
import argparse
import os
import itertools
import os

from deploy.mx_infer.utils import log
from deploy.mx_infer.framework.module_data_type import InferModelComb
from deploy.mx_infer.processors import SUPPORT_DET_MODEL, SUPPORT_REC_MODEL
from deploy.mx_infer.utils import log


def str2bool(v):
return v.lower() in ("true", "t", "1")


def get_args():
"""
command line parameters for inference
"""
parser = argparse.ArgumentParser(description='Arguments for inference.')
parser.add_argument('--input_images_dir', type=str, required=True,
help='Input images dir for inference, can be dir containing multiple images or path of single '
Expand Down Expand Up @@ -38,7 +42,7 @@ def get_args():
help='Saving dir for visualization of detection results.')
parser.add_argument('--vis_pipeline_save_dir', type=str, required=False,
help='Saving dir for visualization of pipeline inference results.')
parser.add_argument('--vis_font_path', type=str, default='', required=False,
parser.add_argument('--vis_font_path', type=str, required=False,
help='Font file path for recognition model.')
parser.add_argument('--save_pipeline_crop_res', type=str2bool, default=False, required=False,
help='Whether save the images cropped during pipeline.')
Expand All @@ -57,13 +61,27 @@ def get_args():


def setup_logger(args):
"""
initialize log system
"""
log.init_logger(args.show_log, args.save_log_dir)
log.save_args(args)


def update_task_args(args):
det = os.path.exists(args.det_model_path) if isinstance(args.det_model_path, str) else False
cls = os.path.exists(args.cls_model_path) if isinstance(args.cls_model_path, str) else False
rec = os.path.exists(args.rec_model_path) if isinstance(args.rec_model_path, str) else False
"""
add internal parameters according to different task type
"""
if args.det_model_path and not os.path.exists(args.det_model_path):
raise ValueError(f"The det_model_path of '{args.det_model_path}' does not exist.")
if args.cls_model_path and not os.path.exists(args.cls_model_path):
raise ValueError(f"The cls_model_path of '{args.cls_model_path}' does not exist.")
if args.rec_model_path and not os.path.exists(args.rec_model_path):
raise ValueError(f"The rec_model_path of '{args.rec_model_path}' does not exist.")

det = bool(args.det_model_path)
cls = bool(args.cls_model_path)
rec = bool(args.rec_model_path)

task_map = {
(True, False, False): InferModelComb.DET,
Expand All @@ -74,26 +92,30 @@ def update_task_args(args):

task_order = (det, cls, rec)
if task_order in task_map:
taks_type = task_map[task_order]
setattr(args, 'task_type', taks_type)
task_type = task_map[task_order]
setattr(args, 'task_type', task_type)
setattr(args, 'save_vis_det_save_dir', bool(args.vis_det_save_dir))
setattr(args, 'save_vis_pipeline_save_dir', bool(args.vis_pipeline_save_dir))
else:
if not (det or cls or rec):
raise ValueError(f"det_model_path, cls_model_path, rec_model_path cannot be empty at the same time.")
elif det:
raise ValueError(f"rec_model_path can't be empty when det_model_path and cls_model_path are not empty.")
else:
raise ValueError(f"cls_model_path{args.cls_model_path} model does not support inference independently.")
unsupported_task_map = {
(False, False, False): "empty",
(True, True, False): "det+cls",
(False, True, False): "cls",
(False, True, True): "cls+rec"
}

setattr(args, 'save_vis_det_save_dir', True if args.vis_det_save_dir else False)
setattr(args, 'save_vis_pipeline_save_dir', True if args.vis_pipeline_save_dir else False)
raise ValueError(
f"Only support det, rec, det+rec and det+cls+rec, but got {unsupported_task_map[task_order]}. "
f"Please check model_path!")

return args


def check_args(args):
if not args.input_images_dir or \
(not os.path.isfile(args.input_images_dir) and not os.path.isdir(args.input_images_dir)) or \
(os.path.isdir(args.input_images_dir) and len(os.listdir(args.input_images_dir)) == 0):
"""
check parameters
"""
if not args.input_images_dir or not os.path.exists(args.input_images_dir):
raise ValueError(f"input_images_dir must be dir containing multiple images or path of single image.")

if args.det_model_path and not os.path.isfile(args.det_model_path):
Expand All @@ -102,8 +124,9 @@ def check_args(args):
if args.cls_model_path and not os.path.isfile(args.cls_model_path):
raise ValueError(f"cls_model_path must be a model file path for classification.")

if args.rec_model_path and (os.path.isdir(args.rec_model_path) and len(os.listdir(args.rec_model_path)) == 0):
raise ValueError(f"rec_model_path must a model file or dir containing model file for recognition model.")
if args.rec_model_path and (not os.path.exists(args.rec_model_path) or (
os.path.isdir(args.rec_model_path) and not os.listdir(args.rec_model_path))):
raise ValueError(f"rec_model_path must be a model file or dir containing model file for recognition model.")

if args.rec_model_path and (not args.rec_char_dict_path or not os.path.isfile(args.rec_char_dict_path)):
raise ValueError(
Expand All @@ -127,7 +150,7 @@ def check_args(args):
f"for single detection task.")

if not args.res_save_dir:
raise ValueError(f"res_save_dir cant be empty.")
raise ValueError(f"res_save_dir can't be empty.")

if args.det_algorithm not in SUPPORT_DET_MODEL:
raise ValueError(f"det_algorithm only support {SUPPORT_DET_MODEL}, but got {args.det_algorithm}.")
Expand Down
5 changes: 2 additions & 3 deletions deploy/mx_infer/framework/module_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,9 @@
from ctypes import c_longdouble
from multiprocessing import Manager

from .module_data_type import ModuleInitArgs

from deploy.mx_infer.data_type import ProfilingData
from deploy.mx_infer.utils import log
from .module_data_type import ModuleInitArgs


class ModuleBase(object):
Expand Down Expand Up @@ -68,7 +67,7 @@ def process(self, input_data):
pass

@abstractmethod
def init_self_args(self, ):
def init_self_args(self):
self.msg_queue.put(f'{self.__class__.__name__} instance id {self.instance_id} init complete')
log.info(f'{self.__class__.__name__} instance id {self.instance_id} init complete')

Expand Down
2 changes: 1 addition & 1 deletion deploy/mx_infer/framework/module_manager.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from collections import defaultdict, namedtuple
from multiprocessing import Queue, Process

from .module_data_type import ModulesInfo, ModuleInitArgs
from deploy.mx_infer.processors import processor_initiator
from deploy.mx_infer.utils import log
from .module_data_type import ModulesInfo, ModuleInitArgs

OutputRegisterInfo = namedtuple('OutputRegisterInfo', ['pipeline_name', 'module_send', 'module_recv'])

Expand Down
16 changes: 14 additions & 2 deletions deploy/mx_infer/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,19 @@
import time
from collections import defaultdict
from multiprocessing import Process, Queue

import tqdm

from deploy.mx_infer.data_type import StopSign
from deploy.mx_infer.framework import ModuleDesc, ModuleConnectDesc, ModuleManager, SupportedTaskOrder
from deploy.mx_infer.processors import MODEL_DICT
from deploy.mx_infer.utils import log, profiling, safe_div, save_path_init, TASK_QUEUE_SIZE


def image_sender(images_path, send_queue, show_progressbar):
"""
send image to input queue for pipeline
"""
if os.path.isdir(images_path):
input_image_list = [os.path.join(images_path, path) for path in os.listdir(images_path)]
if show_progressbar:
Expand All @@ -23,9 +28,12 @@ def image_sender(images_path, send_queue, show_progressbar):


def build_pipeline_kernel(args, input_queue):
"""
build and register pipeline
"""
task_type = args.task_type
parallel_num = args.parallel_num
module_desc_list = [ModuleDesc('HandoutProcess', 1), ModuleDesc('DecodeProcess', parallel_num), ]
module_desc_list = [ModuleDesc('HandoutProcess', 1), ModuleDesc('DecodeProcess', parallel_num)]

module_order = SupportedTaskOrder[task_type]

Expand Down Expand Up @@ -96,11 +104,15 @@ def build_pipeline(args):
if args.save_log_dir:
save_path_init(args.save_log_dir, exist_ok=True)

if os.path.isdir(args.input_images_dir) and not os.listdir(args.input_images_dir):
log.warning(f"The input_images_dir directory '{args.input_images_dir}' is empty, no image to process.")
return

task_queue = Queue(TASK_QUEUE_SIZE)
process = Process(target=build_pipeline_kernel, args=(args, task_queue))
process.start()
image_sender(images_path=args.input_images_dir, send_queue=task_queue,
show_progressbar=False if args.show_log else True)
show_progressbar=not args.show_log)
task_queue.put(StopSign(), block=True)
process.join()
process.close()
3 changes: 1 addition & 2 deletions deploy/mx_infer/processors/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from sys import modules

from deploy.mx_infer.utils import log
from deploy.mx_infer.framework import InferModelComb

from deploy.mx_infer.utils import log
from .classification import CLSPreProcess, CLSInferProcess
from .common import HandoutProcess, CollectProcess, DecodeProcess
from .detection import DetPreProcess, DetInferProcess, DetPostProcess, SUPPORT_DET_MODEL
Expand Down
1 change: 0 additions & 1 deletion deploy/mx_infer/processors/common/collect_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from deploy.mx_infer.data_type import StopData, ProcessData, ProfilingData
from deploy.mx_infer.framework import ModuleBase, InferModelComb
from deploy.mx_infer.utils import safe_list_writer, log

from tools.utils.visualize import VisMode, Visualization

_RESULTS_SAVE_FILENAME = {
Expand Down
4 changes: 2 additions & 2 deletions deploy/mx_infer/processors/detection/det_infer_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def init_self_args(self):
if desc != "dynamic_height_width":
raise ValueError("model input shape must be dynamic image_size with gear.")

batchsize, channel, hw_list = shape_info
_, channel, hw_list = shape_info
self.gear_list = hw_list
self.model_channel = channel
self.max_dot_gear = max([(h, w) for h, w in hw_list], key=lambda x: x[0] * x[1])
Expand All @@ -45,7 +45,7 @@ def process(self, input_data):
self.send_to_next_module(input_data)
return
input_array = input_data.input_array
n, c, h, w = input_array.shape
_, _, h, w = input_array.shape

matched_gear = get_matched_gear_hw((h, w), self.gear_list, self.max_dot_gear)
input_array = padding_with_np(input_array, matched_gear)
Expand Down
2 changes: 1 addition & 1 deletion deploy/mx_infer/processors/detection/det_post_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def get_boxes_from_maps(self, pred: np.ndarray, binary_map: np.ndarray, dest_wid
outs = cv2.findContours((binary_map * 255).astype(np.uint8), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
if len(outs) == 3:
img, contours, _ = outs[0], outs[1], outs[2]
_, contours, _ = outs[0], outs[1], outs[2]
elif len(outs) == 2:
contours, _ = outs[0], outs[1]

Expand Down
2 changes: 1 addition & 1 deletion deploy/mx_infer/processors/recognition/rec_post_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

import numpy as np

from deploy.mx_infer.utils import array_to_texts, file_base_check, log
from deploy.mx_infer.framework import ModuleBase, InferModelComb
from deploy.mx_infer.utils import array_to_texts, file_base_check, log


class RecPostProcess(ModuleBase):
Expand Down
19 changes: 5 additions & 14 deletions deploy/mx_infer/utils/cv_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def expand(input_images):
"""
if the type of input_images is numpy array,expand the first axis
if the type of input_images is list, convert the list to numpy array
:param input_images:
:param input_images: input image
:return: the numpy array of shape (batchsize,channel,height,width)
"""
if isinstance(input_images, np.ndarray):
Expand All @@ -107,9 +107,6 @@ def expand(input_images):
def unclip(box: np.ndarray, unclip_ratio: float):
"""
expand the box by unclip ratio
:param box:
:param unclip_ratio:
:return:
"""
poly = Polygon(box)
distance = safe_div(poly.area * unclip_ratio, poly.length)
Expand All @@ -122,12 +119,6 @@ def unclip(box: np.ndarray, unclip_ratio: float):
def construct_box(box: np.ndarray, height: int, width: int, dest_height: int, dest_width: int):
"""
resize the box to the original size.
:param box:
:param height:
:param width:
:param dest_height:
:param dest_width:
:return:
"""
try:
box[:, 0] = np.clip(
Expand Down Expand Up @@ -175,7 +166,7 @@ def box_score_fast(shrink_map: np.ndarray, input_box: np.ndarray):
using box mean score as the mean score
:param shrink_map: the output feature map of DBNet
:param input_box: the min boxes
:return:
:return:
"""
height, width = shrink_map.shape[:2]
box = input_box.copy()
Expand All @@ -196,7 +187,7 @@ def box_score_slow(shrink_map: np.ndarray, contour: np.ndarray):
using polyon mean score as the mean score
:param shrink_map: the output feature map of DBNet
:param contour: the contours
:return:
:return:
"""
height, width = shrink_map.shape
contour = contour.copy()
Expand Down Expand Up @@ -293,14 +284,14 @@ def get_shape_info(shape: list, gears: list, nchw=True):
else:
batchsize, height, width, channel = shape

if channel != 1 and channel != 3:
if channel not in (1, 3):
raise ValueError("model channel number must be 1 or 3")

# static shape or dynamic shape without gear
if -1 not in shape:
return "static_shape", (batchsize, channel, height, width)

if len(gears) == 0:
if not gears:
return "dynamic_shape", (batchsize, channel, height, width)

# dynamic shape with gear
Expand Down
4 changes: 1 addition & 3 deletions deploy/mx_infer/utils/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,11 +177,9 @@ def save_args(self, args):
else:
logging.error('This api just support argparse or dict, please check your input type.')
raise Exception('This api just support argparse or dict, please check your input type.')
self.debug('Args:')
self.info('Args:')
args_copy = args.copy()
for key, value in args_copy.items():
if isinstance(value, str) and "/" in value:
args_copy[key] = ''
self.info('--> %s: %s', key, self.filter_log_str(args_copy[key]))
self.info('Finish read param')

Expand Down
3 changes: 1 addition & 2 deletions deploy/mx_infer/utils/safe_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,5 @@ def save_path_init(path, exist_ok=False):
if os.path.exists(path):
if exist_ok:
return
else:
shutil.rmtree(path)
shutil.rmtree(path)
os.makedirs(path, 0o750)
4 changes: 0 additions & 4 deletions tools/utils/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,18 +38,14 @@ def vis_bbox(self, image, box_list, color, thickness):

def vis_bbox_text(self, image, box_list, text_list, color, thickness, font_path):
image_bbox = self.vis_bbox(image, box_list, color, thickness)
# image_text = Image.new('RGB', (image_bbox.width, image_bbox.height), (255, 255, 255))
image_text = image_bbox.copy()
# image_text = np.zeros((image_bbox.shape[0], image_bbox.shape[1], 3), np.uint8)
image_text.fill(255)
image_text = self.vis_bbox(image_text, box_list, color, thickness)

# image_bbox = Image.fromarray(cv2.cvtColor(image_bbox, cv2.COLOR_BGR2RGB))
image_text = Image.fromarray(image_text)
draw_text = ImageDraw.Draw(image_text)
font = ImageFont.truetype(font_path, 20, encoding='utf-8')
for i, text in enumerate(text_list):
# draw_text.polygon(box_list[i], fill='blue', outline='blue')
draw_text.text(box_list[i][0], text, color, font)
image_concat = np.concatenate([np.array(image_bbox), np.array(image_text)], axis=1)
return image_concat
Expand Down