Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

suport TensorRT inference #2921

Merged
merged 9 commits into from
Jun 1, 2021
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
add cpu_math_library_num_threads params
  • Loading branch information
LDOUBLEV committed May 26, 2021
commit 16d70fb74bbeded98c134df0ab69bfc4b29eff31
13 changes: 9 additions & 4 deletions tools/infer/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@
from PIL import Image, ImageDraw, ImageFont
import math
from paddle import inference
import time
from ppocr.utils.logging import get_logger
logger = get_logger()


def parse_args():
Expand Down Expand Up @@ -98,6 +101,7 @@ def str2bool(v):
parser.add_argument("--cls_thresh", type=float, default=0.9)

parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
parser.add_argument("--cpu_threads", type=int, default=10)
parser.add_argument("--use_pdserving", type=str2bool, default=False)

parser.add_argument("--use_mp", type=str2bool, default=False)
Expand Down Expand Up @@ -140,14 +144,15 @@ def create_predictor(args, mode, logger):
max_batch_size=args.max_batch_size)
else:
config.disable_gpu()
config.set_cpu_math_library_num_threads(6)
if hasattr(args, "cpu_threads"):
config.set_cpu_math_library_num_threads(args.cpu_threads)
else:
config.set_cpu_math_library_num_threads(
10) # default cpu threads as 10
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
# TODO LDOUBLEV: fix mkldnn bug when bach_size > 1
#config.set_mkldnn_op({'conv2d', 'depthwise_conv2d', 'pool2d', 'batch_norm'})
args.rec_batch_num = 1

# enable memory optim
config.enable_memory_optim()
Expand Down