From 693263ff6d638ee39e68d135f2a5bb94cf1797f4 Mon Sep 17 00:00:00 2001 From: bryson-davis Date: Sun, 10 Jan 2021 18:40:36 +0800 Subject: [PATCH 1/2] Rename hard sample to hard example --- examples/helmet_detection_inference/README.md | 4 +-- .../little_model/little_model.py | 30 ++++++++-------- .../joint_inference/joint_inference.py | 36 ++++++++++--------- 3 files changed, 37 insertions(+), 33 deletions(-) diff --git a/examples/helmet_detection_inference/README.md b/examples/helmet_detection_inference/README.md index 7fd6ff4..722551e 100644 --- a/examples/helmet_detection_inference/README.md +++ b/examples/helmet_detection_inference/README.md @@ -72,7 +72,7 @@ EOF Note the setting of the following parameters, which have to same as the script [little_model.py](/examples/helmet_detection_inference/little_model/little_model.py): - hardExampleMining: set hard example algorithm from {IBT, CrossEntropy} for inferring in edge side. - video_url: set the url for video streaming. -- all_sample_inference_output: set your output path for the inference results, and note that the root path has to be /home/data. +- all_examples_inference_output: set your output path for the inference results, and note that the root path has to be /home/data. - hard_example_edge_inference_output: set your output path for results of inferring hard examples in edge side. - hard_example_cloud_inference_output: set your output path for results of inferring hard examples in cloud side. @@ -105,7 +105,7 @@ spec: value: "416,736" - key: "video_url" value: "rtsp://localhost/video" - - key: "all_sample_inference_output" + - key: "all_examples_inference_output" value: "/home/data/output" - key: "hard_example_cloud_inference_output" value: "/home/data/hard_example_cloud_inference_output" diff --git a/examples/helmet_detection_inference/little_model/little_model.py b/examples/helmet_detection_inference/little_model/little_model.py index b3815fb..5558c39 100644 --- a/examples/helmet_detection_inference/little_model/little_model.py +++ b/examples/helmet_detection_inference/little_model/little_model.py @@ -16,12 +16,14 @@ colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (255, 255, 255)] class_names = ['person', 'helmet', 'helmet_on', 'helmet_off'] -all_output_path = neptune.context.get_parameters('all_sample_inference_output') -hard_sample_edge_output_path = neptune.context.get_parameters( - 'hard_sample_edge_inference_output' +all_output_path = neptune.context.get_parameters( + 'all_examples_inference_output' ) -hard_sample_cloud_output_path = neptune.context.get_parameters( - 'hard_sample_cloud_inference_output' +hard_example_edge_output_path = neptune.context.get_parameters( + 'hard_example_edge_inference_output' +) +hard_example_cloud_output_path = neptune.context.get_parameters( + 'hard_example_cloud_inference_output' ) @@ -131,20 +133,20 @@ def output_deal(inference_result: InferenceResult, nframe, img_rgb): cv2.imwrite(f"{all_output_path}/{nframe}.jpeg", collaboration_frame) - # save hard sample image to dir - if not inference_result.is_hard_sample: + # save hard example image to dir + if not inference_result.is_hard_example: return - if inference_result.hard_sample_cloud_result is not None: - cv2.imwrite(f"{hard_sample_cloud_output_path}/{nframe}.jpeg", + if inference_result.hard_example_cloud_result is not None: + cv2.imwrite(f"{hard_example_cloud_output_path}/{nframe}.jpeg", collaboration_frame) edge_collaboration_frame = draw_boxes( img_rgb, - inference_result.hard_sample_edge_result, + inference_result.hard_example_edge_result, colors="green,blue,yellow,red", text_thickness=None, box_thickness=None) - cv2.imwrite(f"{hard_sample_edge_output_path}/{nframe}.jpeg", + cv2.imwrite(f"{hard_example_edge_output_path}/{nframe}.jpeg", edge_collaboration_frame) @@ -163,8 +165,8 @@ def run(): camera_address = neptune.context.get_parameters('video_url') mkdir(all_output_path) - mkdir(hard_sample_edge_output_path) - mkdir(hard_sample_cloud_output_path) + mkdir(hard_example_edge_output_path) + mkdir(hard_example_cloud_output_path) # create little model object model = neptune.joint_inference.TSLittleModel( @@ -174,7 +176,7 @@ def run(): create_input_feed=create_input_feed, create_output_fetch=create_output_fetch ) - # create hard sample algorithm + # create hard example algorithm threshold_box = float(neptune.context.get_hem_parameters( "threshold_box", 0.5 )) diff --git a/lib/neptune/joint_inference/joint_inference.py b/lib/neptune/joint_inference/joint_inference.py index fb79f7d..183ae88 100644 --- a/lib/neptune/joint_inference/joint_inference.py +++ b/lib/neptune/joint_inference/joint_inference.py @@ -253,7 +253,7 @@ def run(self): info.inferenceNumber = self.inference_number info.hardExampleNumber = self.hard_example_number info.uploadCloudRatio = ( - self.hard_example_number / self.inference_number + self.hard_example_number / self.inference_number ) message = { "name": BaseConfig.worker_name, @@ -271,28 +271,29 @@ def run(self): class InferenceResult: """The Result class for joint inference - :param is_hard_sample: `True` means a hard sample, `False` means not a hard - sample + :param is_hard_example: `True` means a hard example, `False` means not a + hard example :param final_result: the final inference result - :param hard_sample_edge_result: the edge little model inference result of - hard sample - :param hard_sample_cloud_result: the cloud big model inference result of - hard sample + :param hard_example_edge_result: the edge little model inference result of + hard example + :param hard_example_cloud_result: the cloud big model inference result of + hard example """ - def __init__(self, is_hard_sample, final_result, - hard_sample_edge_result, hard_sample_cloud_result): - self.is_hard_sample = is_hard_sample + def __init__(self, is_hard_example, final_result, + hard_example_edge_result, hard_example_cloud_result): + self.is_hard_example = is_hard_example self.final_result = final_result - self.hard_sample_edge_result = hard_sample_edge_result - self.hard_sample_cloud_result = hard_sample_cloud_result + self.hard_example_edge_result = hard_example_edge_result + self.hard_example_cloud_result = hard_example_cloud_result class JointInference: """Class provided for external systems for model joint inference. :param little_model: the little model entity for edge inference - :param hard_example_mining_algorithm: the algorithm for judging hard sample + :param hard_example_mining_algorithm: the algorithm for judging hard + example :param pre_hook: the pre function of edge inference :param post_hook: the post function of edge inference """ @@ -327,7 +328,7 @@ def __init__(self, little_model: BaseModel, else: hard_example_mining_algorithm = ThresholdFilter() - self.cloud_offload_algorithm = hard_example_mining_algorithm + self.hard_example_mining_algorithm = hard_example_mining_algorithm self.pre_hook = pre_hook self.post_hook = post_hook @@ -343,9 +344,10 @@ def inference(self, img_data) -> InferenceResult: edge_result = self.little_model.inference(img_data_pre) if self.post_hook: edge_result = self.post_hook(edge_result) - is_hard_sample = self.cloud_offload_algorithm.hard_judge(edge_result) - if not is_hard_sample: - LOG.debug("not hard sample, use edge result directly") + is_hard_example = self.hard_example_mining_algorithm.hard_judge( + edge_result) + if not is_hard_example: + LOG.debug("not hard example, use edge result directly") self.lc_reporter.update_for_edge_inference() return InferenceResult(False, edge_result, None, None) From 4925635bbf29a8430b1bd8d68803c0b7c21f295f Mon Sep 17 00:00:00 2001 From: bryson-davis Date: Sun, 10 Jan 2021 18:47:49 +0800 Subject: [PATCH 2/2] Clean pre_hook/post_hook of JointInference lib code --- .../little_model/little_model.py | 10 ++++------ .../joint_inference/joint_inference.py | 19 +++++++------------ 2 files changed, 11 insertions(+), 18 deletions(-) diff --git a/examples/helmet_detection_inference/little_model/little_model.py b/examples/helmet_detection_inference/little_model/little_model.py index 5558c39..ea63e0e 100644 --- a/examples/helmet_detection_inference/little_model/little_model.py +++ b/examples/helmet_detection_inference/little_model/little_model.py @@ -82,7 +82,7 @@ def preprocess(image, input_shape): new_image.fill(128) bh, bw, _ = new_image.shape new_image[int((bh - nh) / 2):(nh + int((bh - nh) / 2)), - int((bw - nw) / 2):(nw + int((bw - nw) / 2)), :] = image + int((bw - nw) / 2):(nw + int((bw - nw) / 2)), :] = image new_image /= 255. new_image = np.expand_dims(new_image, 0) # Add batch dimension. @@ -112,7 +112,7 @@ def create_output_fetch(sess): return output_fetch -def post_hook(model_output): +def postprocess(model_output): all_classes, all_scores, all_bboxes = model_output bboxes = [] for c, s, bbox in zip(all_classes, all_scores, all_bboxes): @@ -171,7 +171,7 @@ def run(): # create little model object model = neptune.joint_inference.TSLittleModel( preprocess=preprocess, - postprocess=None, + postprocess=postprocess, input_shape=input_shape, create_input_feed=create_input_feed, create_output_fetch=create_output_fetch @@ -188,9 +188,7 @@ def run(): # create joint inference object inference_instance = neptune.joint_inference.JointInference( little_model=model, - hard_example_mining_algorithm=hard_example_mining_algorithm, - pre_hook=None, - post_hook=post_hook, + hard_example_mining_algorithm=hard_example_mining_algorithm ) # use video streams for testing diff --git a/lib/neptune/joint_inference/joint_inference.py b/lib/neptune/joint_inference/joint_inference.py index 183ae88..15b64d1 100644 --- a/lib/neptune/joint_inference/joint_inference.py +++ b/lib/neptune/joint_inference/joint_inference.py @@ -203,7 +203,10 @@ def inference(self, img_data): input_feed = self.create_input_feed(self.session, new_image, img_data_np) output_fetch = self.create_output_fetch(self.session) - return self.session.run(output_fetch, input_feed) + output = self.session.run(output_fetch, input_feed) + if self.postprocess: + output = self.postprocess(output) + return output class LCReporter(threading.Thread): @@ -294,13 +297,10 @@ class JointInference: :param little_model: the little model entity for edge inference :param hard_example_mining_algorithm: the algorithm for judging hard example - :param pre_hook: the pre function of edge inference - :param post_hook: the post function of edge inference """ def __init__(self, little_model: BaseModel, - hard_example_mining_algorithm=None, - pre_hook=None, post_hook=None): + hard_example_mining_algorithm=None): self.little_model = little_model self.big_model = BigModelClient() # TODO how to deal process use-defined cloud_offload_algorithm, @@ -329,8 +329,6 @@ def __init__(self, little_model: BaseModel, hard_example_mining_algorithm = ThresholdFilter() self.hard_example_mining_algorithm = hard_example_mining_algorithm - self.pre_hook = pre_hook - self.post_hook = post_hook self.lc_reporter = LCReporter() self.lc_reporter.setDaemon(True) @@ -339,13 +337,10 @@ def __init__(self, little_model: BaseModel, def inference(self, img_data) -> InferenceResult: """Image inference function.""" img_data_pre = img_data - if self.pre_hook: - img_data_pre = self.pre_hook(img_data_pre) edge_result = self.little_model.inference(img_data_pre) - if self.post_hook: - edge_result = self.post_hook(edge_result) is_hard_example = self.hard_example_mining_algorithm.hard_judge( - edge_result) + edge_result + ) if not is_hard_example: LOG.debug("not hard example, use edge result directly") self.lc_reporter.update_for_edge_inference()