diff --git a/fastdeploy/vision/common/processors/limit_long.cc b/fastdeploy/vision/common/processors/limit_long.cc index 7109636cc5..0229f63bd1 100644 --- a/fastdeploy/vision/common/processors/limit_long.cc +++ b/fastdeploy/vision/common/processors/limit_long.cc @@ -38,8 +38,8 @@ bool LimitLong::ImplByOpenCV(Mat* mat) { return true; } -bool LimitLong::Run(Mat* mat, int max_long, int min_long, ProcLib lib) { - auto l = LimitLong(max_long, min_long); +bool LimitLong::Run(Mat* mat, int max_long, int min_long, int interp, ProcLib lib) { + auto l = LimitLong(max_long, min_long, interp); return l(mat, lib); } } // namespace vision diff --git a/fastdeploy/vision/common/processors/limit_long.h b/fastdeploy/vision/common/processors/limit_long.h index e059652bc0..81a92f802b 100644 --- a/fastdeploy/vision/common/processors/limit_long.h +++ b/fastdeploy/vision/common/processors/limit_long.h @@ -36,7 +36,7 @@ class LimitLong : public Processor { std::string Name() { return "LimitLong"; } static bool Run(Mat* mat, int max_long = -1, int min_long = -1, - ProcLib lib = ProcLib::OPENCV); + int interp = 1, ProcLib lib = ProcLib::OPENCV); int GetMaxLong() const { return max_long_; } private: diff --git a/fastdeploy/vision/common/processors/limit_short.cc b/fastdeploy/vision/common/processors/limit_short.cc index 348291ff0b..ad0aaaeb62 100644 --- a/fastdeploy/vision/common/processors/limit_short.cc +++ b/fastdeploy/vision/common/processors/limit_short.cc @@ -40,8 +40,8 @@ bool LimitShort::ImplByOpenCV(Mat* mat) { return true; } -bool LimitShort::Run(Mat* mat, int max_short, int min_short, ProcLib lib) { - auto l = LimitShort(max_short, min_short); +bool LimitShort::Run(Mat* mat, int max_short, int min_short, int interp, ProcLib lib) { + auto l = LimitShort(max_short, min_short, interp); return l(mat, lib); } } // namespace vision diff --git a/fastdeploy/vision/common/processors/limit_short.h b/fastdeploy/vision/common/processors/limit_short.h index 75a0b1bbe4..854385274a 100644 --- a/fastdeploy/vision/common/processors/limit_short.h +++ b/fastdeploy/vision/common/processors/limit_short.h @@ -36,7 +36,7 @@ class LimitShort : public Processor { std::string Name() { return "LimitShort"; } static bool Run(Mat* mat, int max_short = -1, int min_short = -1, - ProcLib lib = ProcLib::OPENCV); + int interp = 1, ProcLib lib = ProcLib::OPENCV); int GetMaxShort() const { return max_short_; } private: diff --git a/fastdeploy/vision/matting/ppmatting/ppmatting.cc b/fastdeploy/vision/matting/ppmatting/ppmatting.cc index 33ac11b58c..9c342d3157 100644 --- a/fastdeploy/vision/matting/ppmatting/ppmatting.cc +++ b/fastdeploy/vision/matting/ppmatting/ppmatting.cc @@ -70,9 +70,9 @@ bool PPMatting::BuildPreprocessPipelineFromConfig() { "dimension is %zu.", input_shape.size()); - bool is_fixed_input_shape = false; + is_fixed_input_shape_ = false; if (input_shape[2] > 0 && input_shape[3] > 0) { - is_fixed_input_shape = true; + is_fixed_input_shape_ = true; } if (input_shape[2] < 0 || input_shape[3] < 0) { FDWARNING << "Detected dynamic input shape of your model, only Paddle " @@ -88,7 +88,7 @@ bool PPMatting::BuildPreprocessPipelineFromConfig() { if (op["type"].as() == "LimitShort") { int max_short = op["max_short"] ? op["max_short"].as() : -1; int min_short = op["min_short"] ? op["min_short"].as() : -1; - if (is_fixed_input_shape) { + if (is_fixed_input_shape_) { // if the input shape is fixed, will resize by scale, and the max // shape will not exceed input_shape long_size = max_short; @@ -100,7 +100,7 @@ bool PPMatting::BuildPreprocessPipelineFromConfig() { std::make_shared(max_short, min_short)); } } else if (op["type"].as() == "ResizeToIntMult") { - if (is_fixed_input_shape) { + if (is_fixed_input_shape_) { std::vector max_size = {input_shape[2], input_shape[3]}; processors_.push_back( std::make_shared(long_size, 1, true, max_size)); @@ -120,7 +120,7 @@ bool PPMatting::BuildPreprocessPipelineFromConfig() { processors_.push_back(std::make_shared(mean, std)); } else if (op["type"].as() == "ResizeByShort") { long_size = op["short_size"].as(); - if (is_fixed_input_shape) { + if (is_fixed_input_shape_) { std::vector max_size = {input_shape[2], input_shape[3]}; processors_.push_back( std::make_shared(long_size, 1, true, max_size)); @@ -169,24 +169,26 @@ bool PPMatting::Postprocess( FDERROR << "Only support post process with float32 data." << std::endl; return false; } + std::vector dim{0, 2, 3, 1}; + Transpose(alpha_tensor, &alpha_tensor, dim); + alpha_tensor.Squeeze(0); + Mat mat = CreateFromTensor(alpha_tensor); auto iter_ipt = im_info.find("input_shape"); auto iter_out = im_info.find("output_shape"); + if (is_fixed_input_shape_){ + double scale_h = static_cast(iter_out->second[0]) / + static_cast(iter_ipt->second[0]); + double scale_w = static_cast(iter_out->second[1]) / + static_cast(iter_ipt->second[1]); + double actual_scale = std::min(scale_h, scale_w); - double scale_h = static_cast(iter_out->second[0]) / - static_cast(iter_ipt->second[0]); - double scale_w = static_cast(iter_out->second[1]) / - static_cast(iter_ipt->second[1]); - double actual_scale = std::min(scale_h, scale_w); + int size_before_pad_h = round(actual_scale * iter_ipt->second[0]); + int size_before_pad_w = round(actual_scale * iter_ipt->second[1]); - int size_before_pad_h = round(actual_scale * iter_ipt->second[0]); - int size_before_pad_w = round(actual_scale * iter_ipt->second[1]); - std::vector dim{0, 2, 3, 1}; - Transpose(alpha_tensor, &alpha_tensor, dim); - alpha_tensor.Squeeze(0); - Mat mat = CreateFromTensor(alpha_tensor); + Crop::Run(&mat, 0, 0, size_before_pad_w, size_before_pad_h); + } - Crop::Run(&mat, 0, 0, size_before_pad_w, size_before_pad_h); Resize::Run(&mat, iter_ipt->second[1], iter_ipt->second[0]); result->Clear(); diff --git a/fastdeploy/vision/matting/ppmatting/ppmatting.h b/fastdeploy/vision/matting/ppmatting/ppmatting.h index 864507f6b6..69f9d32185 100644 --- a/fastdeploy/vision/matting/ppmatting/ppmatting.h +++ b/fastdeploy/vision/matting/ppmatting/ppmatting.h @@ -61,6 +61,7 @@ class FASTDEPLOY_DECL PPMatting : public FastDeployModel { std::vector> processors_; std::string config_file_; + bool is_fixed_input_shape_; }; } // namespace matting diff --git a/tests/eval_example/test_ppmatting.py b/tests/eval_example/test_ppmatting.py index f26fd358f4..f1f1883854 100644 --- a/tests/eval_example/test_ppmatting.py +++ b/tests/eval_example/test_ppmatting.py @@ -36,9 +36,9 @@ def test_matting_ppmatting(): # 预测图片抠图结果 im = cv2.imread("./matting_input.jpg") result = model.predict(im.copy()) - pkl_url = "" + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl" if pkl_url: - fd.download("ppmatting_result.pkl", ".") + fd.download(pkl_url, ".") with open("./ppmatting_result.pkl", "rb") as f: baseline = pickle.load(f) @@ -66,9 +66,9 @@ def test_matting_ppmodnet(): im = cv2.imread("./matting_input.jpg") result = model.predict(im.copy()) - pkl_url = "" + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl" if pkl_url: - fd.download("ppmodnet_result.pkl", ".") + fd.download(pkl_url, ".") with open("./ppmodnet_result.pkl", "rb") as f: baseline = pickle.load(f) @@ -96,9 +96,9 @@ def test_matting_pphumanmatting(): im = cv2.imread("./matting_input.jpg") result = model.predict(im.copy()) - pkl_url = "" + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl" if pkl_url: - fd.download("pphumanmatting_result.pkl", ".") + fd.download(pkl_url, ".") with open("./pphumanmatting_result.pkl", "rb") as f: baseline = pickle.load(f)