diff --git a/Dockerfile.QA b/Dockerfile.QA index e2248ccf1e..79590a51c9 100644 --- a/Dockerfile.QA +++ b/Dockerfile.QA @@ -24,12 +24,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# Multistage build. -# - ARG BASE_IMAGE=tritonserver -ARG BUILD_IMAGE=tritonserver_build +ARG CIBASE_IMAGE=tritonserver_cibase ARG SDK_IMAGE=tritonserver_sdk ARG TRITON_COMMON_REPO_TAG=main ARG TRITON_CORE_REPO_TAG=main @@ -38,11 +34,10 @@ ARG TRITON_BACKEND_REPO_TAG=main ARG TRITONTMP_DIR=/tmp ############################################################################ -## Build tests in the BUILD_IMAGE since it has already been configured -## correctly and has some existing build artifacts. Copy artifacts -## into QA area. +## Test artifacts built as part of the tritonserver build are +## available in CIBASE_IMAGE. Copy these artifacts into the QA area. ############################################################################ -FROM ${BUILD_IMAGE} AS build +FROM ${CIBASE_IMAGE} AS cibase ARG TRITONTMP_DIR ARG TRITON_COMMON_REPO_TAG @@ -53,6 +48,28 @@ ARG TRITON_BACKEND_REPO_TAG # Ensure apt-get won't prompt for selecting options ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + libarchive-dev \ + libboost-dev \ + python3-dev \ + python3-pip \ + rapidjson-dev \ + software-properties-common && \ + rm -rf /var/lib/apt/lists/* + +RUN pip3 install --upgrade pip && \ + pip3 install --upgrade wheel setuptools + +RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | \ + gpg --dearmor - | \ + tee /etc/apt/trusted.gpg.d/kitware.gpg >/dev/null && \ + apt-add-repository 'deb https://apt.kitware.com/ubuntu/ focal main' && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + cmake-data=3.21.1-0kitware1ubuntu20.04.1 cmake=3.21.1-0kitware1ubuntu20.04.1 + # Add inception_graphdef model to example repo WORKDIR /workspace/docs/examples/model_repository RUN mkdir -p inception_graphdef/1 && \ @@ -64,8 +81,8 @@ RUN mkdir -p inception_graphdef/1 && \ # Update the qa/ directory with test executables, models, etc. WORKDIR /workspace RUN mkdir -p qa/common && \ - cp -r /workspace/src/test/models/repeat_int32 qa/L0_decoupled/models/ && \ - cp -r /workspace/src/test/models/square_int32 qa/L0_decoupled/models/ && \ + cp -r src/test/models/repeat_int32 qa/L0_decoupled/models/ && \ + cp -r src/test/models/square_int32 qa/L0_decoupled/models/ && \ mkdir qa/L0_simple_example/models && \ cp -r docs/examples/model_repository/simple qa/L0_simple_example/models/. && \ mkdir qa/L0_simple_go_client/models && \ @@ -81,58 +98,59 @@ RUN mkdir -p qa/common && \ mkdir qa/L0_cuda_shared_memory/models && \ cp -r docs/examples/model_repository/simple qa/L0_cuda_shared_memory/models/. && \ mkdir qa/L0_client_java/models && \ - cp -r /workspace/docs/examples/model_repository/simple qa/L0_client_java/models && \ + cp -r docs/examples/model_repository/simple qa/L0_client_java/models && \ mkdir qa/L0_grpc/models && \ - cp -r /workspace/docs/examples/model_repository/simple qa/L0_grpc/models && \ - cp -r /workspace/docs/examples/model_repository/simple_dyna_sequence qa/L0_grpc/models && \ - cp -r /workspace/docs/examples/model_repository/simple_int8 qa/L0_grpc/models && \ - cp -r /workspace/docs/examples/model_repository/simple_identity qa/L0_grpc/models && \ - cp -r /workspace/docs/examples/model_repository/simple_sequence qa/L0_grpc/models && \ - cp -r /workspace/docs/examples/model_repository/simple_string qa/L0_grpc/models && \ - cp -r /workspace/docs/examples/model_repository/inception_graphdef qa/L0_grpc/models && \ + cp -r docs/examples/model_repository/simple qa/L0_grpc/models && \ + cp -r docs/examples/model_repository/simple_dyna_sequence qa/L0_grpc/models && \ + cp -r docs/examples/model_repository/simple_int8 qa/L0_grpc/models && \ + cp -r docs/examples/model_repository/simple_identity qa/L0_grpc/models && \ + cp -r docs/examples/model_repository/simple_sequence qa/L0_grpc/models && \ + cp -r docs/examples/model_repository/simple_string qa/L0_grpc/models && \ + cp -r docs/examples/model_repository/inception_graphdef qa/L0_grpc/models && \ mkdir qa/L0_http/models && \ - cp -r /workspace/docs/examples/model_repository/simple qa/L0_http/models && \ - cp -r /workspace/docs/examples/model_repository/simple_dyna_sequence qa/L0_http/models && \ - cp -r /workspace/docs/examples/model_repository/simple_identity qa/L0_http/models && \ - cp -r /workspace/docs/examples/model_repository/simple_sequence qa/L0_http/models && \ - cp -r /workspace/docs/examples/model_repository/simple_string qa/L0_http/models && \ - cp -r /workspace/docs/examples/model_repository/inception_graphdef qa/L0_http/models && \ + cp -r docs/examples/model_repository/simple qa/L0_http/models && \ + cp -r docs/examples/model_repository/simple_dyna_sequence qa/L0_http/models && \ + cp -r docs/examples/model_repository/simple_identity qa/L0_http/models && \ + cp -r docs/examples/model_repository/simple_sequence qa/L0_http/models && \ + cp -r docs/examples/model_repository/simple_string qa/L0_http/models && \ + cp -r docs/examples/model_repository/inception_graphdef qa/L0_http/models && \ mkdir qa/L0_https/models && \ cp -r docs/examples/model_repository/simple qa/L0_https/models/. && \ mkdir qa/L0_secure_grpc/models && \ cp -r docs/examples/model_repository/simple qa/L0_secure_grpc/models/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/install/bin/simple qa/L0_simple_lib/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/install/bin/memory_alloc qa/L0_io/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/install/bin/multi_server qa/L0_multi_server/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/memory_test qa/L0_memory/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/pinned_memory_manager_test qa/L0_memory/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/repo_agent_test qa/L0_triton_repo_agent/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/lib/libtritonrepoagent_relocation.so qa/L0_triton_repo_agent/. && \ + cp bin/simple qa/L0_simple_lib/. && \ + cp bin/memory_alloc qa/L0_io/. && \ + cp bin/multi_server qa/L0_multi_server/. && \ + cp bin/memory_test qa/L0_memory/. && \ + cp bin/pinned_memory_manager_test qa/L0_memory/. && \ + cp bin/repo_agent_test qa/L0_triton_repo_agent/. && \ + cp lib/libtritonrepoagent_relocation.so qa/L0_triton_repo_agent/. && \ mkdir qa/L0_query/models/query/1 && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/backends/query/libtriton_query.so qa/L0_query/models/query/1/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/query_test qa/L0_query/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/register_api_test qa/L0_register/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/async_work_queue_test qa/L0_async_work_queue/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/backends/implicit_state/libtriton_implicit_state.so qa/L0_implicit_state/. && \ + cp tritonbuild/tritonserver/backends/query/libtriton_query.so qa/L0_query/models/query/1/. && \ + cp bin/query_test qa/L0_query/. && \ + cp bin/register_api_test qa/L0_register/. && \ + cp bin/async_work_queue_test qa/L0_async_work_queue/. && \ + cp tritonbuild/tritonserver/backends/implicit_state/libtriton_implicit_state.so \ + qa/L0_implicit_state/. && \ mkdir qa/L0_data_compression/models && \ - cp -r /workspace/docs/examples/model_repository/simple qa/L0_data_compression/models && \ - cp -r /workspace/deploy/mlflow-triton-plugin qa/L0_mlflow/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/data_compressor_test qa/L0_data_compression/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/metrics_api_test qa/L0_metrics/. + cp -r docs/examples/model_repository/simple qa/L0_data_compression/models && \ + cp bin/data_compressor_test qa/L0_data_compression/. && \ + cp bin/metrics_api_test qa/L0_metrics/. && \ + cp -r deploy/mlflow-triton-plugin qa/L0_mlflow/. # caffe2plan will not exist if the build was done without TensorRT enabled -RUN if [ -f ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/caffe2plan ]; then \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/bin/caffe2plan qa/common/.; \ +RUN if [ -f bin/caffe2plan ]; then \ + cp bin/caffe2plan qa/common/.; \ fi RUN mkdir -p qa/L0_simple_ensemble/models/simple/1 && \ - cp /workspace/docs/examples/model_repository/simple/1/model.graphdef \ + cp docs/examples/model_repository/simple/1/model.graphdef \ qa/L0_simple_ensemble/models/simple/1/. && \ mkdir -p qa/L0_simple_ensemble/models/simple/2 && \ - cp /workspace/docs/examples/model_repository/simple/1/model.graphdef \ + cp docs/examples/model_repository/simple/1/model.graphdef \ qa/L0_simple_ensemble/models/simple/2/. && \ mkdir -p qa/L0_socket/models/simple/1 && \ - cp /workspace/docs/examples/model_repository/simple/1/model.graphdef \ + cp docs/examples/model_repository/simple/1/model.graphdef \ qa/L0_socket/models/simple/1/. RUN mkdir -p qa/L0_backend_identity/models && \ @@ -140,17 +158,17 @@ RUN mkdir -p qa/L0_backend_identity/models && \ mkdir -p qa/L0_backend_identity/models/identity_fp32/1 RUN mkdir -p qa/custom_models/custom_sequence_int32/1 && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/backends/sequence/libtriton_sequence.so \ + cp tritonbuild/tritonserver/backends/sequence/libtriton_sequence.so \ qa/custom_models/custom_sequence_int32/1/. && \ mkdir -p qa/custom_models/custom_dyna_sequence_int32/1 && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/backends/dyna_sequence/libtriton_dyna_sequence.so \ + cp tritonbuild/tritonserver/backends/dyna_sequence/libtriton_dyna_sequence.so \ qa/custom_models/custom_dyna_sequence_int32/1/. -# L0_lifecycle needs No-GPU build of identity backend -RUN cd ${TRITONTMP_DIR}/tritonbuild/identity && \ +# L0_lifecycle needs No-GPU build of identity backend. +RUN cd tritonbuild/identity && \ rm -rf install build && mkdir build && cd build && \ cmake -DTRITON_ENABLE_GPU=OFF \ - -DCMAKE_INSTALL_PREFIX:PATH=${TRITONTMP_DIR}/tritonbuild/identity/install \ + -DCMAKE_INSTALL_PREFIX:PATH=/workspace/tritonbuild/identity/install \ -DTRITON_COMMON_REPO_TAG:STRING=${TRITON_COMMON_REPO_TAG} \ -DTRITON_CORE_REPO_TAG:STRING=${TRITON_CORE_REPO_TAG} \ -DTRITON_THIRD_PARTY_REPO_TAG:STRING=${TRITON_THIRD_PARTY_REPO_TAG} \ @@ -158,17 +176,17 @@ RUN cd ${TRITONTMP_DIR}/tritonbuild/identity && \ make -j16 install # L0_backend_python test require triton_shm_monitor -RUN cd ${TRITONTMP_DIR}/tritonbuild/python && \ +RUN cd tritonbuild/python && \ rm -rf install build && mkdir build && cd build && \ - cmake -DCMAKE_INSTALL_PREFIX:PATH=${TRITONTMP_DIR}/tritonbuild/python/install \ + cmake -DCMAKE_INSTALL_PREFIX:PATH=/workspace/tritonbuild/python/install \ -DTRITON_COMMON_REPO_TAG:STRING=${TRITON_COMMON_REPO_TAG} \ -DTRITON_CORE_REPO_TAG:STRING=${TRITON_CORE_REPO_TAG} \ -DTRITON_BACKEND_REPO_TAG:STRING=${TRITON_BACKEND_REPO_TAG} .. && \ - make -j18 triton-shm-monitor install + make -j16 triton-shm-monitor install -RUN cp ${TRITONTMP_DIR}/tritonbuild/identity/install/backends/identity/libtriton_identity.so \ +RUN cp tritonbuild/identity/install/backends/identity/libtriton_identity.so \ qa/L0_lifecycle/. && \ - cp ${TRITONTMP_DIR}/tritonbuild/python/install/backends/python/triton_shm_monitor*.so \ + cp tritonbuild/python/install/backends/python/triton_shm_monitor*.so \ qa/common/. && \ mkdir -p qa/L0_perf_nomodel/custom_models/custom_zero_1_float32/1 && \ mkdir -p qa/L0_perf_pyclients/custom_models/custom_zero_1_int32/1 && \ @@ -213,14 +231,13 @@ RUN if [ -d qa/L0_model_control_stress ]; then \ cp -r qa/L0_model_control_stress/. qa/L0_model_control_stress_valgrind_massif; \ fi -RUN cp ${TRITONTMP_DIR}/tritonbuild/install/backends/repeat/libtriton_repeat.so \ - qa/L0_model_config/. +RUN cp backends/repeat/libtriton_repeat.so qa/L0_model_config/. RUN mkdir -p qa/L0_decoupled/models/repeat_int32/1 && \ - cp ${TRITONTMP_DIR}/tritonbuild/install/backends/repeat/libtriton_repeat.so \ + cp backends/repeat/libtriton_repeat.so \ qa/L0_decoupled/models/repeat_int32/1/. RUN mkdir -p qa/L0_decoupled/models/square_int32/1 && \ - cp ${TRITONTMP_DIR}/tritonbuild/install/backends/square/libtriton_square.so \ + cp backends/square/libtriton_square.so \ qa/L0_decoupled/models/square_int32/1/. RUN mkdir -p qa/L0_decoupled/models/identity_int32/1 RUN mkdir -p qa/L0_decoupled/models/simple_repeat/1 && \ @@ -229,10 +246,10 @@ RUN mkdir -p qa/L0_decoupled/models/simple_repeat/1 && \ mkdir -p qa/L0_decoupled/models/repeat_square/1 && \ mkdir -p qa/L0_decoupled/models/nested_square/1 RUN mkdir -p qa/L0_repoagent_checksum/models/identity_int32/1 && \ - cp ${TRITONTMP_DIR}/tritonbuild/install/backends/identity/libtriton_identity.so \ + cp tritonbuild/identity/install/backends/identity/libtriton_identity.so \ qa/L0_repoagent_checksum/models/identity_int32/1/. RUN mkdir -p qa/L0_passive_instance/models/distributed_int32_int32_int32/1 && \ - cp ${TRITONTMP_DIR}/tritonbuild/tritonserver/install/backends/distributed_addsub/libtriton_distributed_addsub.so \ + cp tritonbuild/tritonserver/backends/distributed_addsub/libtriton_distributed_addsub.so \ qa/L0_passive_instance/models/distributed_int32_int32_int32/1/. ############################################################################ @@ -242,7 +259,7 @@ FROM ${SDK_IMAGE} AS sdk ARG TARGETPLATFORM WORKDIR /workspace -COPY --from=build /workspace/qa/ qa/ +COPY --from=cibase /workspace/qa/ qa/ RUN mkdir -p qa/clients && mkdir -p qa/pkgs && \ cp -a install/bin/* qa/clients/. && \ cp install/lib/libgrpcclient.so qa/clients/. && \ diff --git a/Dockerfile.win10.min b/Dockerfile.win10.min index 1e77927c75..2a35172301 100644 --- a/Dockerfile.win10.min +++ b/Dockerfile.win10.min @@ -135,6 +135,4 @@ RUN setx PATH "%PATH%;c:\TensorRT\lib;%CUDA_INSTALL_ROOT_WP%\bin" # that cmake can find the packages installed by vcpkg. ENV CMAKE_TOOLCHAIN_FILE /vcpkg/scripts/buildsystems/vcpkg.cmake ENV VCPKG_TARGET_TRIPLET x64-windows -ENV TRITONBUILD_CMAKE_TOOLCHAIN_FILE /vcpkg/scripts/buildsystems/vcpkg.cmake -ENV TRITONBUILD_VCPKG_TARGET_TRIPLET x64-windows ENTRYPOINT C:\BuildTools\VC\Auxiliary\Build\vcvars64.bat && diff --git a/build.py b/build.py index afe2c9ce73..d008a37b44 100755 --- a/build.py +++ b/build.py @@ -27,27 +27,30 @@ import argparse import logging +import os import os.path import multiprocessing import pathlib import platform import shutil +import stat import subprocess import sys import traceback -from distutils.dir_util import copy_tree from inspect import getsourcefile # # Build Triton Inference Server. # -# By default build.py builds the Triton container. The TRITON_VERSION -# file indicates the Triton version and TRITON_VERSION_MAP is used to -# determine the corresponding container version and upstream container -# version (upstream containers are dependencies required by -# Triton). These versions may be overridden. See docs/build.md for -# more information. +# By default build.py builds the Triton Docker image, but can also be +# used to build without Docker. See docs/build.md and --help for more +# infomation. +# +# The TRITON_VERSION file indicates the Triton version and +# TRITON_VERSION_MAP is used to determine the corresponding container +# version and upstream container version (upstream containers are +# dependencies required by Triton). These versions may be overridden. # Map from Triton version to corresponding container and component versions. # @@ -96,20 +99,15 @@ '2.2.9') # DCGM version } -EXAMPLE_BACKENDS = ['identity', 'square', 'repeat'] CORE_BACKENDS = ['ensemble'] -NONCORE_BACKENDS = [ - 'tensorflow1', 'tensorflow2', 'onnxruntime', 'python', 'dali', 'pytorch', - 'openvino', 'fil', 'fastertransformer', 'tensorrt', 'armnn_tflite' -] -EXAMPLE_REPOAGENTS = ['checksum'] + FLAGS = None EXTRA_CORE_CMAKE_FLAGS = {} OVERRIDE_CORE_CMAKE_FLAGS = {} EXTRA_BACKEND_CMAKE_FLAGS = {} OVERRIDE_BACKEND_CMAKE_FLAGS = {} -SCRIPT_DIR = os.path.dirname(os.path.abspath(getsourcefile(lambda: 0))) +THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(getsourcefile(lambda: 0))) def log(msg, force=False): @@ -129,6 +127,12 @@ def fail(msg): fail_if(True, msg) +def fail_if(p, msg): + if p: + print('error: {}'.format(msg), file=sys.stderr) + sys.exit(1) + + def target_platform(): if FLAGS.target_platform is not None: return FLAGS.target_platform @@ -141,126 +145,219 @@ def target_machine(): return platform.machine().lower() -def fail_if(p, msg): - if p: - print('error: {}'.format(msg), file=sys.stderr) - sys.exit(1) - - -def mkdir(path): - log_verbose('mkdir: {}'.format(path)) - pathlib.Path(path).mkdir(parents=True, exist_ok=True) +def tagged_backend(be, version): + tagged_be = be + if be == 'openvino': + if version[0] == 'SPECIFIC': + tagged_be += "_" + version[1] + else: + tagged_be += "_" + version[0].replace('.', '_') + if version[1] and target_platform() != 'windows': + tagged_be += "_pre" + return tagged_be -def rmdir(path): - log_verbose('rmdir: {}'.format(path)) - shutil.rmtree(path, ignore_errors=True) +def container_versions(version, container_version, upstream_container_version): + if container_version is None: + if version not in TRITON_VERSION_MAP: + fail('container version not known for {}'.format(version)) + container_version = TRITON_VERSION_MAP[version][0] + if upstream_container_version is None: + if version not in TRITON_VERSION_MAP: + fail('upstream container version not known for {}'.format(version)) + upstream_container_version = TRITON_VERSION_MAP[version][1] + return container_version, upstream_container_version -def cpdir(src, dest): - log_verbose('cpdir: {} -> {}'.format(src, dest)) - copy_tree(src, dest, preserve_symlinks=1) +class BuildScript: + """Utility class for writing build scripts""" + def __init__(self, filepath, desc=None, verbose=False): + self._filepath = filepath + self._file = open(self._filepath, "w") + self._verbose = verbose + self.header(desc) -def untar(targetdir, tarfile): - log_verbose('untar {} into {}'.format(tarfile, targetdir)) - p = subprocess.Popen(['tar', '--strip-components=1', '-xf', tarfile], - cwd=targetdir) - p.wait() - fail_if(p.returncode != 0, - 'untar {} into {} failed'.format(tarfile, targetdir)) + def __enter__(self): + return self + def __exit__(self, type, value, traceback): + self.close() -def gitclone(cwd, repo, tag, subdir, org): - # If 'tag' starts with "pull/" then it must be of form - # "pull//head". We just clone at "main" and then fetch the - # reference onto a new branch we name "tritonbuildref". - clone_dir = cwd + '/' + subdir - if tag.startswith("pull/"): - log_verbose('git clone of repo "{}" at ref "{}"'.format(repo, tag)) + def __del__(self): + self.close() - if os.path.exists(clone_dir) and not FLAGS.no_force_clone: - rmdir(clone_dir) + def close(self): + if self._file is not None: + if target_platform() == 'windows': + self.blankln() + self._file.write('}\n') + self._file.write('catch {\n') + self._file.write(' $_;\n') + self._file.write(' ExitWithCode 1;\n') + self._file.write('}\n') + """Close the file""" + self._file.close() + self._file = None + st = os.stat(self._filepath) + os.chmod(self._filepath, st.st_mode | stat.S_IEXEC) + + def blankln(self): + self._file.write('\n') + + def commentln(self, cnt): + self._file.write('#' * cnt + '\n') + + def comment(self, msg=''): + if not isinstance(msg, str): + try: + for m in msg: + self._file.write(f'# {msg}\n') + return + except TypeError: + pass + self._file.write(f'# {msg}\n') + + def comment_verbose(self, msg=''): + if self._verbose: + self.comment(msg) + + def header(self, desc=None): + if target_platform() != 'windows': + self._file.write('#!/usr/bin/env bash\n\n') - if not os.path.exists(clone_dir): - p = subprocess.Popen([ - 'git', 'clone', '--recursive', '--depth=1', '{}/{}.git'.format( - org, repo), subdir - ], - cwd=cwd) - p.wait() - fail_if( - p.returncode != 0, - 'git clone of repo "{}" at branch "main" failed'.format(repo)) + if desc is not None: + self.comment() + self.comment(desc) + self.comment() + self.blankln() - log_verbose('git fetch of ref "{}"'.format(tag)) - p = subprocess.Popen( - ['git', 'fetch', 'origin', '{}:tritonbuildref'.format(tag)], - cwd=os.path.join(cwd, subdir)) - p.wait() - fail_if(p.returncode != 0, - 'git fetch of ref "{}" failed'.format(tag)) - - log_verbose('git checkout of tritonbuildref') - p = subprocess.Popen(['git', 'checkout', 'tritonbuildref'], - cwd=os.path.join(cwd, subdir)) - p.wait() - fail_if(p.returncode != 0, - 'git checkout of branch "tritonbuildref" failed') + self.comment('Exit script immediately if any command fails') + if target_platform() == 'windows': + self._file.write('function ExitWithCode($exitcode) {\n') + self._file.write(' $host.SetShouldExit($exitcode)\n') + self._file.write(' exit $exitcode\n') + self._file.write('}\n') + self.blankln() + if self._verbose: + self._file.write('Set-PSDebug -Trace 1\n') + self.blankln() + self._file.write('try {\n') + else: + self._file.write('set -e\n') + if self._verbose: + self._file.write('set -x\n') + self.blankln() - else: - log_verbose('git clone of repo "{}" at tag "{}"'.format(repo, tag)) + def envvar_ref(self, v): + if target_platform() == 'windows': + return f'${{env:{v}}}' + return f'${{{v}}}' - if os.path.exists(clone_dir) and not FLAGS.no_force_clone: - rmdir(clone_dir) + def cmd(self, clist, check_exitcode=False): + if isinstance(clist, str): + self._file.write(f'{clist}\n') + else: + for c in clist: + self._file.write(f'{c} ') + self.blankln() - if not os.path.exists(clone_dir): - p = subprocess.Popen([ - 'git', 'clone', '--recursive', '--single-branch', '--depth=1', - '-b', tag, '{}/{}.git'.format(org, repo), subdir - ], - cwd=cwd) - p.wait() - fail_if( - p.returncode != 0, - 'git clone of repo "{}" at tag "{}" failed'.format(repo, tag)) + if check_exitcode: + if target_platform() == 'windows': + self._file.write('if ($LASTEXITCODE -ne 0) {\n') + self._file.write( + ' Write-Output "exited with status code $LASTEXITCODE";\n') + self._file.write(' ExitWithCode 1;\n') + self._file.write('}\n') + def cwd(self, path): + if target_platform() == 'windows': + self.cmd(f'Set-Location -EV Err -EA Stop {path}') + else: + self.cmd(f'cd {path}') -def prebuild_command(): - p = subprocess.Popen(FLAGS.container_prebuild_command.split()) - p.wait() - fail_if(p.returncode != 0, 'container prebuild cmd failed') + def cp(self, src, dest): + if target_platform() == 'windows': + self.cmd(f'Copy-Item -EV Err -EA Stop {src} -Destination {dest}') + else: + self.cmd(f'cp {src} {dest}') + def mkdir(self, path): + if target_platform() == 'windows': + self.cmd( + f'New-Item -EV Err -EA Stop -ItemType Directory -Force -Path {path}' + ) + else: + self.cmd(f'mkdir -p {pathlib.Path(path)}') -def cmake(cwd, args): - log_verbose('cmake {}'.format(args)) - p = subprocess.Popen([ - 'cmake', - ] + args, cwd=cwd) - p.wait() - fail_if(p.returncode != 0, 'cmake failed') + def rmdir(self, path): + if target_platform() == 'windows': + self.cmd(f'if (Test-Path -Path {path}) {{') + self.cmd(f' Remove-Item -EV Err -EA Stop -Recurse -Force {path}') + self.cmd('}') + else: + self.cmd(f'rm -fr {pathlib.Path(path)}') + def cpdir(self, src, dest): + if target_platform() == 'windows': + self.cmd( + f'Copy-Item -EV Err -EA Stop -Recurse {src} -Destination {dest}' + ) + else: + self.cmd(f'cp -r {src} {dest}') -def makeinstall(cwd, target='install'): - log_verbose('make {}'.format(target)) + def tar(self, subdir, tar_filename): + if target_platform() == 'windows': + fail('unsupported operation: tar') + else: + self.cmd(f'tar zcf {tar_filename} {subdir}') + + def cmake(self, args): + # Pass some additional envvars into cmake... + env_args = [] + for k in ('TRT_VERSION', 'DALI_VERSION', 'CMAKE_TOOLCHAIN_FILE', + 'VCPKG_TARGET_TRIPLET'): + env_args += [f'"-D{k}={self.envvar_ref(k)}"'] + self.cmd(f'cmake {" ".join(env_args)} {" ".join(args)}', + check_exitcode=True) + + def makeinstall(self, target='install'): + if target_platform() == 'windows': + verbose_flag = '' if self._verbose else '-clp:ErrorsOnly' + self.cmd( + f'msbuild.exe -m:{FLAGS.build_parallel} {verbose_flag} -p:Configuration={FLAGS.build_type} {target}.vcxproj', + check_exitcode=True) + else: + verbose_flag = 'VERBOSE=1' if self._verbose else 'VERBOSE=0' + self.cmd(f'make -j{FLAGS.build_parallel} {verbose_flag} {target}') - if target_platform() == 'windows': - verbose_flag = '' if FLAGS.verbose else '-clp:ErrorsOnly' - buildtype_flag = '-p:Configuration={}'.format(FLAGS.build_type) - p = subprocess.Popen([ - 'msbuild.exe', '-m:{}'.format(str(FLAGS.build_parallel)), - verbose_flag, buildtype_flag, '{}.vcxproj'.format(target) - ], - cwd=cwd) - else: - verbose_flag = 'VERBOSE=1' if FLAGS.verbose else 'VERBOSE=0' - p = subprocess.Popen( - ['make', '-j', - str(FLAGS.build_parallel), verbose_flag, target], - cwd=cwd) + def gitclone(self, repo, tag, subdir, org): + clone_dir = subdir + if not FLAGS.no_force_clone: + self.rmdir(clone_dir) - p.wait() - fail_if(p.returncode != 0, 'make {} failed'.format(target)) + if target_platform() == 'windows': + self.cmd(f'if (-Not (Test-Path -Path {clone_dir})) {{') + else: + self.cmd(f'if [[ ! -e {clone_dir} ]]; then') + + # If 'tag' starts with "pull/" then it must be of form + # "pull//head". We just clone at "main" and then fetch the + # reference onto a new branch we name "tritonbuildref". + if tag.startswith("pull/"): + self.cmd( + f' git clone --recursive --depth=1 {org}/{repo}.git {subdir};', + check_exitcode=True) + self.cmd('}' if target_platform() == 'windows' else 'fi') + self.cwd(subdir) + self.cmd(f'git fetch origin {tag}:tritonbuildref', + check_exitcode=True) + self.cmd(f'git checkout tritonbuildref', check_exitcode=True) + else: + self.cmd( + f' git clone --recursive --single-branch --depth=1 -b {tag} {org}/{repo}.git {subdir};', + check_exitcode=True) + self.cmd('}' if target_platform() == 'windows' else 'fi') def cmake_core_arg(name, type, value): @@ -272,7 +369,7 @@ def cmake_core_arg(name, type, value): type = '' else: type = ':{}'.format(type) - return '-D{}{}={}'.format(name, type, value) + return '"-D{}{}={}"'.format(name, type, value) def cmake_core_enable(name, flag): @@ -283,13 +380,13 @@ def cmake_core_enable(name, flag): value = OVERRIDE_CORE_CMAKE_FLAGS[name] else: value = 'ON' if flag else 'OFF' - return '-D{}:BOOL={}'.format(name, value) + return '"-D{}:BOOL={}"'.format(name, value) def cmake_core_extra_args(): args = [] for k, v in EXTRA_CORE_CMAKE_FLAGS.items(): - args.append('-D{}={}'.format(k, v)) + args.append('"-D{}={}"'.format(k, v)) return args @@ -303,7 +400,7 @@ def cmake_backend_arg(backend, name, type, value): type = '' else: type = ':{}'.format(type) - return '-D{}{}={}'.format(name, type, value) + return '"-D{}{}={}"'.format(name, type, value) def cmake_backend_enable(backend, name, flag): @@ -316,14 +413,14 @@ def cmake_backend_enable(backend, name, flag): value = OVERRIDE_BACKEND_CMAKE_FLAGS[backend][name] if value is None: value = 'ON' if flag else 'OFF' - return '-D{}:BOOL={}'.format(name, value) + return '"-D{}:BOOL={}"'.format(name, value) def cmake_backend_extra_args(backend): args = [] if backend in EXTRA_BACKEND_CMAKE_FLAGS: for k, v in EXTRA_BACKEND_CMAKE_FLAGS[backend].items(): - args.append('-D{}={}'.format(k, v)) + args.append('"-D{}={}"'.format(k, v)) return args @@ -333,13 +430,13 @@ def cmake_repoagent_arg(name, type, value): type = '' else: type = ':{}'.format(type) - return '-D{}{}={}'.format(name, type, value) + return '"-D{}{}={}"'.format(name, type, value) def cmake_repoagent_enable(name, flag): # For now there is no override for repo-agents value = 'ON' if flag else 'OFF' - return '-D{}:BOOL={}'.format(name, value) + return '"-D{}:BOOL={}"'.format(name, value) def cmake_repoagent_extra_args(): @@ -348,7 +445,7 @@ def cmake_repoagent_extra_args(): return args -def core_cmake_args(components, backends, install_dir): +def core_cmake_args(components, backends, cmake_dir, install_dir): cargs = [ cmake_core_arg('CMAKE_BUILD_TYPE', None, FLAGS.build_type), cmake_core_arg('CMAKE_INSTALL_PREFIX', 'PATH', install_dir), @@ -406,14 +503,8 @@ def core_cmake_args(components, backends, install_dir): cargs.append( cmake_core_enable('TRITON_ENABLE_TENSORRT', 'tensorrt' in backends)) - # If TRITONBUILD_* is defined in the env then we use it to set - # corresponding cmake value. - for evar, eval in os.environ.items(): - if evar.startswith('TRITONBUILD_'): - cargs.append(cmake_core_arg(evar[len('TRITONBUILD_'):], None, eval)) - cargs += cmake_core_extra_args() - cargs.append(FLAGS.cmake_dir) + cargs.append(cmake_dir) return cargs @@ -422,10 +513,7 @@ def repoagent_repo(ra): def repoagent_cmake_args(images, components, ra, install_dir): - if ra in EXAMPLE_REPOAGENTS: - args = [] - else: - fail('unknown agent {}'.format(ra)) + args = [] cargs = args + [ cmake_repoagent_arg('CMAKE_BUILD_TYPE', None, FLAGS.build_type), @@ -437,14 +525,6 @@ def repoagent_cmake_args(images, components, ra, install_dir): ] cargs.append(cmake_repoagent_enable('TRITON_ENABLE_GPU', FLAGS.enable_gpu)) - - # If TRITONBUILD_* is defined in the env then we use it to set - # corresponding cmake value. - for evar, eval in os.environ.items(): - if evar.startswith('TRITONBUILD_'): - cargs.append( - cmake_repoagent_arg(evar[len('TRITONBUILD_'):], None, eval)) - cargs += cmake_repoagent_extra_args() cargs.append('..') return cargs @@ -482,10 +562,8 @@ def backend_cmake_args(images, components, be, install_dir, library_paths, args = [] elif be == 'tensorrt': args = tensorrt_cmake_args() - elif be in EXAMPLE_BACKENDS: - args = [] else: - fail('unknown backend {}'.format(be)) + args = [] cargs = args + [ cmake_backend_arg(be, 'CMAKE_BUILD_TYPE', None, FLAGS.build_type), @@ -506,13 +584,6 @@ def backend_cmake_args(images, components, be, install_dir, library_paths, cargs.append( cmake_backend_enable(be, 'TRITON_ENABLE_STATS', FLAGS.enable_stats)) - # If TRITONBUILD_* is defined in the env then we use it to set - # corresponding cmake value. - for evar, eval in os.environ.items(): - if evar.startswith('TRITONBUILD_'): - cargs.append( - cmake_backend_arg(be, evar[len('TRITONBUILD_'):], None, eval)) - cargs += cmake_backend_extra_args(be) cargs.append('..') return cargs @@ -764,19 +835,6 @@ def install_dcgm_libraries(dcgm_version, target_machine): '''.format(dcgm_version, dcgm_version) -def get_container_versions(version, container_version, - upstream_container_version): - if container_version is None: - if version not in TRITON_VERSION_MAP: - fail('container version not known for {}'.format(version)) - container_version = TRITON_VERSION_MAP[version][0] - if upstream_container_version is None: - if version not in TRITON_VERSION_MAP: - fail('upstream container version not known for {}'.format(version)) - upstream_container_version = TRITON_VERSION_MAP[version][1] - return container_version, upstream_container_version - - def create_dockerfile_buildbase(ddir, dockerfile_name, argmap): df = ''' ARG TRITON_VERSION={} @@ -849,6 +907,15 @@ def create_dockerfile_buildbase(ddir, dockerfile_name, argmap): cmake-data=3.21.1-0kitware1ubuntu20.04.1 cmake=3.21.1-0kitware1ubuntu20.04.1 ''' + if FLAGS.enable_gpu: + df += install_dcgm_libraries(argmap['DCGM_VERSION'], + target_machine()) + + df += ''' +ENV TRITON_SERVER_VERSION ${TRITON_VERSION} +ENV NVIDIA_TRITON_SERVER_VERSION ${TRITON_CONTAINER_VERSION} +''' + # Copy in the triton source. We remove existing contents first in # case the FROM container has something there already. if target_platform() == 'windows': @@ -863,71 +930,44 @@ def create_dockerfile_buildbase(ddir, dockerfile_name, argmap): RUN rm -fr * COPY . . ENTRYPOINT [] -''' - if FLAGS.enable_gpu: - df += install_dcgm_libraries(argmap['DCGM_VERSION'], - target_machine()) - - df += ''' -ENV TRITON_SERVER_VERSION ${TRITON_VERSION} -ENV NVIDIA_TRITON_SERVER_VERSION ${TRITON_CONTAINER_VERSION} ''' - mkdir(ddir) with open(os.path.join(ddir, dockerfile_name), "w") as dfile: dfile.write(df) -def create_dockerfile_build(ddir, dockerfile_name, backends, build_dir): +def create_dockerfile_cibase(ddir, dockerfile_name, argmap): df = ''' -FROM tritonserver_builder_image AS build -FROM tritonserver_buildbase -COPY --from=build {0} {0} -'''.format(build_dir) +ARG TRITON_VERSION={} +ARG TRITON_CONTAINER_VERSION={} +ARG BASE_IMAGE={} +'''.format(argmap['TRITON_VERSION'], argmap['TRITON_CONTAINER_VERSION'], + argmap['BASE_IMAGE']) - # If requested, package the source code for all OSS used to build - # Triton Windows is not delivered as a container (and tar not - # available) so skip for windows platform. - if target_platform() != 'windows': - if not FLAGS.no_core_build and not FLAGS.no_container_source: - df += ''' -RUN mkdir -p {0}/install/third-party-src && \ - (cd {0}/tritonserver/build && \ - tar zcf {0}/install/third-party-src/src.tar.gz third-party-src) -COPY --from=build /workspace/docker/README.third-party-src {0}/install/third-party-src/README -'''.format(build_dir) + df += ''' +FROM ${BASE_IMAGE} + +ARG TRITON_VERSION +ARG TRITON_CONTAINER_VERSION + +COPY build/ci /workspace + +WORKDIR /workspace + +ENV TRITON_SERVER_VERSION ${TRITON_VERSION} +ENV NVIDIA_TRITON_SERVER_VERSION ${TRITON_CONTAINER_VERSION} +''' - if 'onnxruntime' in backends: - if target_platform() != 'windows': - df += ''' -# Copy ONNX custom op library and model (needed for testing) -RUN if [ -d {0}/onnxruntime ]; then \ - cp {0}/onnxruntime/install/test/libcustom_op_library.so /workspace/qa/L0_custom_ops/.; \ - cp {0}/onnxruntime/install/test/custom_op_test.onnx /workspace/qa/L0_custom_ops/.; \ - fi -'''.format(build_dir) - - mkdir(ddir) with open(os.path.join(ddir, dockerfile_name), "w") as dfile: dfile.write(df) def create_dockerfile_linux(ddir, dockerfile_name, argmap, backends, repoagents, - endpoints, build_dir): + endpoints): df = ''' -# -# Multistage build. -# ARG TRITON_VERSION={} ARG TRITON_CONTAINER_VERSION={} - ARG BASE_IMAGE={} -ARG BUILD_IMAGE=tritonserver_build - -############################################################################ -## Build image -############################################################################ -FROM ${{BUILD_IMAGE}} AS tritonserver_build '''.format(argmap['TRITON_VERSION'], argmap['TRITON_CONTAINER_VERSION'], argmap['BASE_IMAGE']) @@ -956,49 +996,22 @@ def create_dockerfile_linux(ddir, dockerfile_name, argmap, backends, repoagents, target_machine()) df += ''' +WORKDIR /opt +COPY --chown=1000:1000 build/install tritonserver +RUN chmod -R go-w tritonserver + WORKDIR /opt/tritonserver -COPY --chown=1000:1000 LICENSE . -COPY --chown=1000:1000 TRITON_VERSION . COPY --chown=1000:1000 NVIDIA_Deep_Learning_Container_License.pdf . -''' +''' if not FLAGS.no_core_build: - df += ''' -COPY --chown=1000:1000 --from=tritonserver_build {0}/install/bin/tritonserver bin/ -COPY --chown=1000:1000 --from=tritonserver_build {0}/install/lib/libtritonserver.so lib/ -COPY --chown=1000:1000 --from=tritonserver_build {0}/install/include/triton/core include/triton/core - -# Top-level include/core not copied so --chown does not set it correctly, -# so explicit set on all of include -RUN chown -R triton-server:triton-server include -'''.format(build_dir) - - # If requested, include the source code for all OSS used to build Triton - if not FLAGS.no_container_source: - df += ''' -COPY --chown=1000:1000 --from=tritonserver_build {0}/install/third-party-src third-party-src -'''.format(build_dir) - # Add feature labels for SageMaker endpoint if 'sagemaker' in endpoints: df += ''' LABEL com.amazonaws.sagemaker.capabilities.accept-bind-to-port=true -COPY --chown=1000:1000 --from=tritonserver_build /workspace/docker/sagemaker/serve /usr/bin/. +COPY --chown=1000:1000 docker/sagemaker/serve /usr/bin/. ''' - for noncore in NONCORE_BACKENDS: - if noncore in backends: - df += ''' -COPY --chown=1000:1000 --from=tritonserver_build {0}/install/backends backends -'''.format(build_dir) - break - - if len(repoagents) > 0: - df += ''' -COPY --chown=1000:1000 --from=tritonserver_build {0}/install/repoagents repoagents -'''.format(build_dir) - - mkdir(ddir) with open(os.path.join(ddir, dockerfile_name), "w") as dfile: dfile.write(df) @@ -1154,21 +1167,11 @@ def dockerfile_prepare_container_linux(argmap, backends, enable_gpu, def create_dockerfile_windows(ddir, dockerfile_name, argmap, backends, - repoagents, build_dir): + repoagents): df = ''' -# -# Multistage build. -# ARG TRITON_VERSION={} ARG TRITON_CONTAINER_VERSION={} - ARG BASE_IMAGE={} -ARG BUILD_IMAGE=tritonserver_build - -############################################################################ -## Build image -############################################################################ -FROM ${{BUILD_IMAGE}} AS tritonserver_build ############################################################################ ## Production stage: Create container with just inference server executable @@ -1183,26 +1186,18 @@ def create_dockerfile_windows(ddir, dockerfile_name, argmap, backends, LABEL com.nvidia.tritonserver.version="${{TRITON_SERVER_VERSION}}" RUN setx path "%path%;C:\opt\tritonserver\bin" + '''.format(argmap['TRITON_VERSION'], argmap['TRITON_CONTAINER_VERSION'], argmap['BASE_IMAGE']) df += ''' +WORKDIR /opt +RUN rmdir /S/Q tritonserver || exit 0 +COPY --chown=1000:1000 build/install tritonserver + WORKDIR /opt/tritonserver -RUN rmdir /S/Q * || exit 0 -COPY LICENSE . -COPY TRITON_VERSION . -COPY NVIDIA_Deep_Learning_Container_License.pdf . -COPY --from=tritonserver_build {0}/install/bin bin -COPY --from=tritonserver_build {0}/install/lib/tritonserver.lib lib/ -COPY --from=tritonserver_build {0}/install/include/triton/core include/triton/core -'''.format(build_dir) - - for noncore in NONCORE_BACKENDS: - if noncore in backends: - df += ''' -COPY --from=tritonserver_build {0}/install/backends backends -'''.format(build_dir) - break +COPY --chown=1000:1000 NVIDIA_Deep_Learning_Container_License.pdf . +''' df += ''' ENTRYPOINT [] ENV NVIDIA_BUILD_ID {} @@ -1211,27 +1206,12 @@ def create_dockerfile_windows(ddir, dockerfile_name, argmap, backends, '''.format(argmap['NVIDIA_BUILD_ID'], argmap['NVIDIA_BUILD_ID'], argmap['NVIDIA_BUILD_REF']) - mkdir(ddir) with open(os.path.join(ddir, dockerfile_name), "w") as dfile: dfile.write(df) -def container_build(images, backends, repoagents, endpoints): - # The cmake, build and install directories within the container. - # Windows uses "\" for the path separator but Docker expects "/" - # (unix style) separator. We use replace to fix the path for docker usage. - build_dir = os.path.join(FLAGS.tmp_dir, 'tritonbuild').replace("\\", "/") - install_dir = os.path.join(build_dir, 'install') - if target_platform() == 'windows': - install_dir = os.path.normpath(install_dir) - cmake_dir = os.path.normpath('c:/workspace') - else: - cmake_dir = '/workspace' - - # We can't use docker module for building container because it - # doesn't stream output and it also seems to handle cache-from - # incorrectly which leads to excessive rebuilds in the multistage - # build. +def create_build_dockerfiles(container_build_dir, images, backends, repoagents, + endpoints): if 'base' in images: base_image = images['base'] elif target_platform() == 'windows': @@ -1270,219 +1250,220 @@ def container_build(images, backends, repoagents, endpoints): FLAGS.upstream_container_version) dockerfileargmap['GPU_BASE_IMAGE'] = gpu_base_image - cachefrommap = [ - 'tritonserver_buildbase', 'tritonserver_buildbase_cache0', - 'tritonserver_buildbase_cache1' - ] - - cachefromargs = ['--cache-from={}'.format(k) for k in cachefrommap] - commonargs = [ - 'docker', 'build', '-f', - os.path.join(FLAGS.build_dir, 'Dockerfile.buildbase') - ] - if not FLAGS.no_container_pull: - commonargs += [ - '--pull', - ] + create_dockerfile_buildbase(FLAGS.build_dir, 'Dockerfile.buildbase', + dockerfileargmap) - # Windows docker runs in a VM and memory needs to be specified - # explicitly. if target_platform() == 'windows': - commonargs += ['--memory', FLAGS.container_memory] + create_dockerfile_windows(FLAGS.build_dir, 'Dockerfile', + dockerfileargmap, backends, repoagents) + else: + create_dockerfile_linux(FLAGS.build_dir, 'Dockerfile', dockerfileargmap, + backends, repoagents, endpoints) - log_verbose('buildbase container {}'.format(commonargs + cachefromargs)) - create_dockerfile_buildbase(FLAGS.build_dir, 'Dockerfile.buildbase', - dockerfileargmap) - try: - # Create buildbase image, this is an image with all - # dependencies needed for the build. - p = subprocess.Popen(commonargs + cachefromargs + - ['-t', 'tritonserver_buildbase', '.']) - p.wait() - fail_if(p.returncode != 0, 'docker build tritonserver_buildbase failed') + # Dockerfile used for the creating the CI base image. + create_dockerfile_cibase(FLAGS.build_dir, 'Dockerfile.cibase', + dockerfileargmap) - # Need to extract env from the base image so that we can - # access library versions. - buildbase_env_filepath = os.path.join(FLAGS.build_dir, 'buildbase_env') - with open(buildbase_env_filepath, 'w') as f: - if target_platform() == 'windows': - envargs = [ - 'docker', 'run', '--rm', 'tritonserver_buildbase', - 'cmd.exe', '/c', 'set' - ] - else: - envargs = [ - 'docker', 'run', '--rm', 'tritonserver_buildbase', 'env' - ] - log_verbose('buildbase env {}'.format(envargs)) - p = subprocess.Popen(envargs, stdout=f) - p.wait() - fail_if(p.returncode != 0, - 'extracting tritonserver_buildbase env failed') - - buildbase_env = {} - with open(buildbase_env_filepath, 'r') as f: - for line in f: - kv = line.strip().split('=', 1) - if len(kv) == 2: - key, value = kv - buildbase_env[key] = value - - # We set the following env in the build docker container - # launch below to pass necessary versions into the build. By - # prepending the envvars with TRITONBUILD_ prefix we indicate - # that the build.py execution within the container should set - # the corresponding variables in cmake invocation. - dockerrunenvargs = [] - for k in ['TRT_VERSION', 'DALI_VERSION']: - if k in buildbase_env: - dockerrunenvargs += [ - '--env', 'TRITONBUILD_{}={}'.format(k, buildbase_env[k]) - ] - - # Before attempting to run the new image, make sure any - # previous 'tritonserver_builder' container is removed. - client = docker.from_env(timeout=3600) - try: - existing = client.containers.get('tritonserver_builder') - existing.remove(force=True) - except docker.errors.NotFound: - pass # ignore +def create_docker_build_script(script_name, container_install_dir, + container_ci_dir): + with BuildScript( + os.path.join(FLAGS.build_dir, script_name), + verbose=FLAGS.verbose, + desc=('Docker-based build script for Triton Inference Server' + )) as docker_script: - # Next run build.py inside the container with the same flags - # as was used to run this instance, except: - # - # --no-container-build is added so that within the buildbase - # container we just created we do not attempt to do a nested - # container build # - # Add --version, --container-version and - # --upstream-container-version flags since they can be set - # automatically and so may not be in sys.argv + # Build base image... tritonserver_buildbase # - # --cmake-dir is overridden to 'cmake_dir' + docker_script.commentln(8) + docker_script.comment('Create Triton base build image') + docker_script.comment( + 'This image contains all dependencies necessary to build Triton') + docker_script.comment() + + cachefrommap = [ + 'tritonserver_buildbase', 'tritonserver_buildbase_cache0', + 'tritonserver_buildbase_cache1' + ] + + baseargs = [ + 'docker', 'build', '-t', 'tritonserver_buildbase', '-f', + os.path.join(FLAGS.build_dir, 'Dockerfile.buildbase') + ] + + if not FLAGS.no_container_pull: + baseargs += [ + '--pull', + ] + + # Windows docker runs in a VM and memory needs to be specified + # explicitly (at least for some configurations of docker). + if target_platform() == 'windows': + baseargs += ['--memory', FLAGS.container_memory] + + baseargs += ['--cache-from={}'.format(k) for k in cachefrommap] + baseargs += ['.'] + + docker_script.cwd(THIS_SCRIPT_DIR) + docker_script.cmd(baseargs, check_exitcode=True) + # - # --build-dir is added/overridden to 'build_dir' + # Build... # - # --install-dir is added/overridden to 'install_dir' + docker_script.blankln() + docker_script.commentln(8) + docker_script.comment('Run build in tritonserver_buildbase container') + docker_script.comment( + 'Mount a directory into the container where the install') + docker_script.comment('artifacts will be placed.') + docker_script.comment() + + # Don't use '-v' to communicate the built artifacts out of the + # build, because we want this code to work even if run within + # Docker (i.e. docker-in-docker) and not just if run directly + # from host. runargs = [ - 'python3', - './build.py', + 'docker', 'run', '-w', '/workspace/build', '--name', + 'tritonserver_builder' ] - runargs += sys.argv[1:] - runargs += [ - '--no-container-build', - ] - if FLAGS.version is not None: - runargs += ['--version', FLAGS.version] - if FLAGS.container_version is not None: - runargs += ['--container-version', FLAGS.container_version] - if FLAGS.upstream_container_version is not None: + + if not FLAGS.no_container_interactive: + runargs += ['-it'] + + if target_platform() == 'windows': + runargs += ['--memory', FLAGS.container_memory] runargs += [ - '--upstream-container-version', FLAGS.upstream_container_version + '-v', '\\\\.\pipe\docker_engine:\\\\.\pipe\docker_engine' ] + else: + runargs += ['-v', '/var/run/docker.sock:/var/run/docker.sock'] + + runargs += ['tritonserver_buildbase'] - runargs += ['--cmake-dir', cmake_dir] if target_platform() == 'windows': - runargs += ['--build-dir', os.path.normpath(build_dir)] + runargs += [ + 'powershell.exe', '-noexit', '-File', './cmake_build.ps1' + ] else: - runargs += ['--build-dir', build_dir] - runargs += ['--install-dir', install_dir] + runargs += ['./cmake_build'] - dockerrunargs = [ - 'docker', 'run', '--name', 'tritonserver_builder', '-w', - '/workspace' - ] + # Remove existing tritonserver_builder container... if target_platform() == 'windows': - # Windows docker runs in a VM and memory needs to be - # specified explicitly. - dockerrunargs += ['--memory', FLAGS.container_memory] - dockerrunargs += [ - '-v', '\\\\.\pipe\docker_engine:\\\\.\pipe\docker_engine' - ] + docker_script.cmd(['docker', 'rm', 'tritonserver_builder']) else: - dockerrunargs += ['-v', '/var/run/docker.sock:/var/run/docker.sock'] - dockerrunargs += dockerrunenvargs - dockerrunargs += [ - 'tritonserver_buildbase', + docker_script._file.write( + 'if [ "$(docker ps -a | grep tritonserver_builder)" ]; then docker rm tritonserver_builder; fi\n' + ) + + docker_script.cmd(runargs, check_exitcode=True) + + docker_script.cmd([ + 'docker', 'cp', 'tritonserver_builder:/tmp/tritonbuild/install', + FLAGS.build_dir + ], + check_exitcode=True) + docker_script.cmd([ + 'docker', 'cp', 'tritonserver_builder:/tmp/tritonbuild/ci', + FLAGS.build_dir + ], + check_exitcode=True) + + # + # Final image... tritonserver + # + docker_script.blankln() + docker_script.commentln(8) + docker_script.comment('Create final tritonserver image') + docker_script.comment() + + finalargs = [ + 'docker', 'build', '-t', 'tritonserver', '-f', + os.path.join(FLAGS.build_dir, 'Dockerfile'), '.' ] - dockerrunargs += runargs - log_verbose(dockerrunargs) - p = subprocess.Popen(dockerrunargs) - p.wait() - fail_if(p.returncode != 0, 'docker run tritonserver_builder failed') - - container = client.containers.get('tritonserver_builder') - - # It is possible to copy the install artifacts from the - # container at this point (and, for example put them in the - # specified install directory on the host). But for container - # build we just want to use the artifacts in the server base - # container which is created below. - #mkdir(FLAGS.install_dir) - #tarfilename = os.path.join(FLAGS.install_dir, 'triton.tar') - #install_tar, stat_tar = container.get_archive(install_dir) - #with open(tarfilename, 'wb') as taroutfile: - # for d in install_tar: - # taroutfile.write(d) - #untar(FLAGS.install_dir, tarfilename) - - # Build is complete, save the container as the - # tritonserver_build image. We must do this in two steps: + docker_script.cwd(THIS_SCRIPT_DIR) + docker_script.cmd(finalargs, check_exitcode=True) + # - # 1. Commit the container as image - # "tritonserver_builder_image". This image can't be used - # directly because it binds the /var/run/docker.sock mount - # and so you would need to always run with that mount - # specified... so it can be used this way but very - # inconvenient. + # CI base image... tritonserver_cibase # - # 2. Perform a docker build to create "tritonserver_build" - # from "tritonserver_builder_image" that is essentially - # identical but removes the mount. - try: - client.images.remove('tritonserver_builder_image', force=True) - except docker.errors.ImageNotFound: - pass # ignore - - container.commit('tritonserver_builder_image', 'latest') - container.remove(force=True) - - create_dockerfile_build(FLAGS.build_dir, 'Dockerfile.build', backends, - build_dir) - p = subprocess.Popen([ - 'docker', 'build', '-t', 'tritonserver_build', '-f', - os.path.join(FLAGS.build_dir, 'Dockerfile.build'), '.' - ]) - p.wait() - fail_if(p.returncode != 0, 'docker build tritonserver_build failed') + docker_script.blankln() + docker_script.commentln(8) + docker_script.comment('Create CI base image') + docker_script.comment() + + cibaseargs = [ + 'docker', 'build', '-t', 'tritonserver_cibase', '-f', + os.path.join(FLAGS.build_dir, 'Dockerfile.cibase'), '.' + ] - # Final base image... this is a multi-stage build that uses - # the install artifacts from the tritonserver_build - # container. - if target_platform() == 'windows': - create_dockerfile_windows(FLAGS.build_dir, 'Dockerfile', - dockerfileargmap, backends, repoagents, - build_dir) - else: - create_dockerfile_linux(FLAGS.build_dir, 'Dockerfile', - dockerfileargmap, backends, repoagents, - endpoints, build_dir) - p = subprocess.Popen([ - 'docker', 'build', '-f', - os.path.join(FLAGS.build_dir, 'Dockerfile') - ] + ['-t', 'tritonserver', '.']) - p.wait() - fail_if(p.returncode != 0, 'docker build tritonserver failed') + docker_script.cwd(THIS_SCRIPT_DIR) + docker_script.cmd(cibaseargs, check_exitcode=True) - except Exception as e: - logging.error(traceback.format_exc()) - fail('container build failed') +def core_build(cmake_script, repo_dir, cmake_dir, build_dir, install_dir, + components, backends): + repo_build_dir = os.path.join(build_dir, 'tritonserver', 'build') + repo_install_dir = os.path.join(build_dir, 'tritonserver', 'install') + + cmake_script.commentln(8) + cmake_script.comment('Triton core library and tritonserver executable') + cmake_script.comment() + cmake_script.mkdir(repo_build_dir) + cmake_script.cwd(repo_build_dir) + cmake_script.cmake( + core_cmake_args(components, backends, cmake_dir, repo_install_dir)) + cmake_script.makeinstall() + + if target_platform() == 'windows': + cmake_script.mkdir(os.path.join(install_dir, 'bin')) + cmake_script.cp( + os.path.join(repo_install_dir, 'bin', 'tritonserver.exe'), + os.path.join(install_dir, 'bin')) + cmake_script.cp( + os.path.join(repo_install_dir, 'bin', 'tritonserver.dll'), + os.path.join(install_dir, 'bin')) + else: + cmake_script.mkdir(os.path.join(install_dir, 'bin')) + cmake_script.cp(os.path.join(repo_install_dir, 'bin', 'tritonserver'), + os.path.join(install_dir, 'bin')) + cmake_script.mkdir(os.path.join(install_dir, 'lib')) + cmake_script.cp( + os.path.join(repo_install_dir, 'lib', 'libtritonserver.so'), + os.path.join(install_dir, 'lib')) + + cmake_script.mkdir(os.path.join(install_dir, 'include', 'triton')) + cmake_script.cpdir( + os.path.join(repo_install_dir, 'include', 'triton', 'core'), + os.path.join(install_dir, 'include', 'triton', 'core')) + + cmake_script.cp(os.path.join(repo_dir, 'LICENSE'), install_dir) + cmake_script.cp(os.path.join(repo_dir, 'TRITON_VERSION'), install_dir) -def build_backend(be, + # If requested, package the source code for all OSS used to build + # For windows, Triton is not delivered as a container so skip for + # windows platform. + if target_platform() != 'windows': + if (not FLAGS.no_container_build) and (not FLAGS.no_core_build) and ( + not FLAGS.no_container_source): + cmake_script.mkdir(os.path.join(install_dir, 'third-party-src')) + cmake_script.cwd(repo_build_dir) + cmake_script.tar( + 'third-party-src', + os.path.join(install_dir, 'third-party-src', 'src.tar.gz')) + cmake_script.cp( + os.path.join(repo_dir, 'docker', 'README.third-party-src'), + os.path.join(install_dir, 'third-party-src', 'README')) + + cmake_script.comment() + cmake_script.comment('end Triton core library and tritonserver executable') + cmake_script.commentln(8) + cmake_script.blankln() + + +def backend_build(be, + cmake_script, tag, build_dir, install_dir, @@ -1494,31 +1475,210 @@ def build_backend(be, repo_build_dir = os.path.join(build_dir, be, 'build') repo_install_dir = os.path.join(build_dir, be, 'install') - mkdir(build_dir) - gitclone(build_dir, backend_repo(be), tag, be, github_organization) - mkdir(repo_build_dir) - cmake( - repo_build_dir, + cmake_script.commentln(8) + cmake_script.comment(f'\'{be}\' backend') + cmake_script.comment('Delete this section to remove backend from build') + cmake_script.comment() + cmake_script.mkdir(build_dir) + cmake_script.cwd(build_dir) + cmake_script.gitclone(backend_repo(be), tag, be, github_organization) + + cmake_script.mkdir(repo_build_dir) + cmake_script.cwd(repo_build_dir) + cmake_script.cmake( backend_cmake_args(images, components, be, repo_install_dir, library_paths, variant_index)) - makeinstall(repo_build_dir) + cmake_script.makeinstall() + + cmake_script.mkdir(os.path.join(install_dir, 'backends')) + cmake_script.rmdir(os.path.join(install_dir, 'backends', be)) + cmake_script.cpdir(os.path.join(repo_install_dir, 'backends', be), + os.path.join(install_dir, 'backends')) + + cmake_script.comment() + cmake_script.comment(f'end \'{be}\' backend') + cmake_script.commentln(8) + cmake_script.blankln() + + +def repo_agent_build(ra, cmake_script, build_dir, install_dir, repoagent_repo, + repoagents): + repo_build_dir = os.path.join(build_dir, ra, 'build') + repo_install_dir = os.path.join(build_dir, ra, 'install') + + cmake_script.commentln(8) + cmake_script.comment(f'\'{ra}\' repository agent') + cmake_script.comment( + 'Delete this section to remove repository agent from build') + cmake_script.comment() + cmake_script.mkdir(build_dir) + cmake_script.cwd(build_dir) + cmake_script.gitclone(repoagent_repo(ra), repoagents[ra], ra, + FLAGS.github_organization) + + cmake_script.mkdir(repo_build_dir) + cmake_script.cwd(repo_build_dir) + cmake_script.cmake( + repoagent_cmake_args(images, components, ra, repo_install_dir)) + cmake_script.makeinstall() + + cmake_script.mkdir(os.path.join(install_dir, 'repoagents')) + cmake_script.rmdir(os.path.join(install_dir, 'repoagents', ra)) + cmake_script.cpdir(os.path.join(repo_install_dir, 'repoagents', ra), + os.path.join(install_dir, 'repoagents')) + cmake_script.comment() + cmake_script.comment(f'end \'{ra}\' repository agent') + cmake_script.commentln(8) + cmake_script.blankln() + + +def cibase_build(cmake_script, repo_dir, cmake_dir, build_dir, install_dir, + ci_dir, backends): + repo_build_dir = os.path.join(build_dir, 'tritonserver', 'build') + repo_install_dir = os.path.join(build_dir, 'tritonserver', 'install') + + cmake_script.commentln(8) + cmake_script.comment('Collect Triton CI artifacts') + cmake_script.comment() + + cmake_script.mkdir(ci_dir) + + # On windows we are not yet using a CI/QA docker image for + # testing, so don't do anything... + if target_platform() == 'windows': + return + + # The core build produces some artifacts that are needed for CI + # testing, so include those in the install. + cmake_script.cpdir(os.path.join(repo_dir, 'qa'), ci_dir) + cmake_script.cpdir(os.path.join(repo_dir, 'deploy'), ci_dir) + cmake_script.mkdir(os.path.join(ci_dir, 'docs')) + cmake_script.cpdir(os.path.join(repo_dir, 'docs', 'examples'), + os.path.join(ci_dir, 'docs')) + cmake_script.mkdir(os.path.join(ci_dir, 'src', 'test')) + cmake_script.cpdir(os.path.join(repo_dir, 'src', 'test', 'models'), + os.path.join(ci_dir, 'src', 'test')) + cmake_script.cpdir(os.path.join(repo_install_dir, 'bin'), ci_dir) + cmake_script.mkdir(os.path.join(ci_dir, 'lib')) + cmake_script.cp( + os.path.join(repo_install_dir, 'lib', + 'libtritonrepoagent_relocation.so'), + os.path.join(ci_dir, 'lib')) + + # Some of the backends are needed for CI testing + cmake_script.mkdir(os.path.join(ci_dir, 'backends')) + for be in ('identity', 'repeat', 'square'): + be_install_dir = os.path.join(build_dir, be, 'install', 'backends', be) + if target_platform() == 'windows': + cmake_script.cmd(f'if (Test-Path -Path {be_install_dir}) {{') + else: + cmake_script.cmd(f'if [[ -e {be_install_dir} ]]; then') + cmake_script.cpdir(be_install_dir, os.path.join(ci_dir, 'backends')) + cmake_script.cmd('}' if target_platform() == 'windows' else 'fi') + + # Some of the unit-test built backends are needed for CI testing + cmake_script.mkdir( + os.path.join(ci_dir, 'tritonbuild', 'tritonserver', 'backends')) + for be in ('query', 'implicit_state', 'sequence', 'dyna_sequence', + 'distributed_addsub'): + be_install_dir = os.path.join(repo_install_dir, 'backends', be) + if target_platform() == 'windows': + cmake_script.cmd(f'if (Test-Path -Path {be_install_dir}) {{') + else: + cmake_script.cmd(f'if [[ -e {be_install_dir} ]]; then') + cmake_script.cpdir( + be_install_dir, + os.path.join(ci_dir, 'tritonbuild', 'tritonserver', 'backends')) + cmake_script.cmd('}' if target_platform() == 'windows' else 'fi') + + # The onnxruntime_backend build produces some artifacts that + # are needed for CI testing. + if 'onnxruntime' in backends: + ort_install_dir = os.path.join(build_dir, 'onnxruntime', 'install') + cmake_script.mkdir(os.path.join(ci_dir, 'qa', 'L0_custom_ops')) + cmake_script.cp( + os.path.join(ort_install_dir, 'test', 'libcustom_op_library.so'), + os.path.join(ci_dir, 'qa', 'L0_custom_ops')) + cmake_script.cp( + os.path.join(ort_install_dir, 'test', 'custom_op_test.onnx'), + os.path.join(ci_dir, 'qa', 'L0_custom_ops')) + + # Need the build area for some backends so that they can be + # rebuilt with specific options. + cmake_script.mkdir(os.path.join(ci_dir, 'tritonbuild')) + for be in ('identity', 'python'): + if be in backends: + cmake_script.rmdir(os.path.join(build_dir, be, 'build')) + cmake_script.rmdir(os.path.join(build_dir, be, 'install')) + cmake_script.cpdir(os.path.join(build_dir, be), + os.path.join(ci_dir, 'tritonbuild')) + + cmake_script.comment() + cmake_script.comment('end Triton CI artifacts') + cmake_script.commentln(8) + cmake_script.blankln() + + +def finalize_build(cmake_script, install_dir, ci_dir): + cmake_script.cmd(f'chmod -R a+rw {install_dir}') + cmake_script.cmd(f'chmod -R a+rw {ci_dir}') + + +def enable_all(): + if target_platform() != 'windows': + all_backends = [ + 'ensemble', 'identity', 'square', 'repeat', 'tensorflow1', + 'tensorflow2', 'onnxruntime', 'python', 'dali', 'pytorch', + 'openvino', 'fil', 'tensorrt' + ] + all_repoagents = ['checksum'] + all_filesystems = ['gcs', 's3', 'azure_storage'] + all_endpoints = ['http', 'grpc', 'sagemaker', 'vertex-ai'] + + FLAGS.enable_logging = True + FLAGS.enable_stats = True + FLAGS.enable_metrics = True + FLAGS.enable_gpu_metrics = True + FLAGS.enable_tracing = True + FLAGS.enable_nvtx = True + FLAGS.enable_gpu = True + else: + all_backends = [ + 'ensemble', 'identity', 'square', 'repeat', 'onnxruntime', + 'openvino', 'tensorrt' + ] + all_repoagents = ['checksum'] + all_filesystems = [] + all_endpoints = ['http', 'grpc'] - backend_install_dir = os.path.join(install_dir, 'backends', be) - rmdir(backend_install_dir) - mkdir(backend_install_dir) - cpdir(os.path.join(repo_install_dir, 'backends', be), backend_install_dir) + FLAGS.enable_logging = True + FLAGS.enable_stats = True + FLAGS.enable_tracing = True + FLAGS.enable_gpu = True + requested_backends = [] + for be in FLAGS.backend: + parts = be.split(':') + requested_backends += [parts[0]] + for be in all_backends: + if be not in requested_backends: + FLAGS.backend += [be] -def get_tagged_backend(be, version): - tagged_be = be - if be == 'openvino': - if version[0] == 'SPECIFIC': - tagged_be += "_" + version[1] - else: - tagged_be += "_" + version[0].replace('.', '_') - if version[1] and target_platform() != 'windows': - tagged_be += "_pre" - return tagged_be + requested_repoagents = [] + for ra in FLAGS.repoagent: + parts = ra.split(':') + requested_repoagents += [parts[0]] + for ra in all_repoagents: + if ra not in requested_repoagents: + FLAGS.repoagent += [ra] + + for fs in all_filesystems: + if fs not in FLAGS.filesystem: + FLAGS.filesystem += [fs] + + for ep in all_endpoints: + if ep not in FLAGS.endpoint: + FLAGS.endpoint += [ep] if __name__ == '__main__': @@ -1536,10 +1696,22 @@ def get_tagged_backend(be, version): required=False, help='Enable verbose output.') + parser.add_argument( + '--dryrun', + action="store_true", + required=False, + help='Output the build scripts, but do not perform build.') parser.add_argument('--no-container-build', action="store_true", required=False, help='Do not use Docker container for build.') + parser.add_argument( + '--no-container-interactive', + action="store_true", + required=False, + help= + 'Do not use -it argument to "docker run" when performing container build.' + ) parser.add_argument( '--no-container-pull', action="store_true", @@ -1577,7 +1749,7 @@ def get_tagged_backend(be, version): parser.add_argument( '--build-dir', type=str, - required=True, + required=False, help= 'Build directory. All repo clones and builds will be performed in this directory.' ) @@ -1598,8 +1770,7 @@ def get_tagged_backend(be, version): required=False, default='/tmp', help= - 'Temporary parent directory used for building inside docker. Default is /tmp.' - ) + 'Temporary directory used for building inside docker. Default is /tmp.') parser.add_argument( '--library-paths', action='append', @@ -1672,6 +1843,13 @@ def get_tagged_backend(be, version): 'Use specified Docker image in build as ,. can be "base", "gpu-base", "tensorflow1", "tensorflow2", or "pytorch".' ) + parser.add_argument( + '--enable-all', + action="store_true", + required=False, + help= + 'Enable all standard released Triton features, backends, repository agents, endpoints and file systems.' + ) parser.add_argument('--enable-logging', action="store_true", required=False, @@ -1818,27 +1996,50 @@ def get_tagged_backend(be, version): if FLAGS.extra_backend_cmake_arg is None: FLAGS.extra_backend_cmake_arg = [] - if FLAGS.install_dir is None: - FLAGS.install_dir = os.path.join(FLAGS.build_dir, "opt", "tritonserver") - - # FLAGS.cmake_dir defaults to the directory containing build.py. - if FLAGS.cmake_dir is None: - from inspect import getsourcefile - FLAGS.cmake_dir = SCRIPT_DIR + # if --enable-all is specified, then update FLAGS to enable all + # settings, backends, repo-agents, file systems, endpoints, etc. + if FLAGS.enable_all: + enable_all() + + # When doing a docker build, --build-dir, --install-dir and + # --cmake-dir must not be set. We will use the build/ subdir + # within the server/ repo that contains this build.py script for + # --build-dir. If not doing a docker build, --build-dir must be + # set. + if FLAGS.no_container_build: + if FLAGS.build_dir is None: + fail('--no-container-build requires --build-dir') + if FLAGS.install_dir is None: + FLAGS.install_dir = os.path.join(FLAGS.build_dir, "opt", + "tritonserver") + if FLAGS.cmake_dir is None: + FLAGS.cmake_dir = THIS_SCRIPT_DIR + else: + if FLAGS.build_dir is not None: + fail('--build-dir must not be set for container-based build') + if FLAGS.install_dir is not None: + fail('--install-dir must not be set for container-based build') + if FLAGS.cmake_dir is not None: + fail('--cmake-dir must not be set for container-based build') + FLAGS.build_dir = os.path.join(THIS_SCRIPT_DIR, 'build') # Determine the versions. Start with Triton version, if --version # is not explicitly specified read from TRITON_VERSION file. if FLAGS.version is None: - with open(os.path.join(SCRIPT_DIR, 'TRITON_VERSION'), "r") as vfile: + with open(os.path.join(THIS_SCRIPT_DIR, 'TRITON_VERSION'), + "r") as vfile: FLAGS.version = vfile.readline().strip() + if FLAGS.build_parallel is None: + FLAGS.build_parallel = multiprocessing.cpu_count() * 2 + log('Building Triton Inference Server') log('platform {}'.format(target_platform())) log('machine {}'.format(target_machine())) log('version {}'.format(FLAGS.version)) - log('cmake dir {}'.format(FLAGS.cmake_dir)) log('build dir {}'.format(FLAGS.build_dir)) log('install dir {}'.format(FLAGS.install_dir)) + log('cmake dir {}'.format(FLAGS.cmake_dir)) # Determine the default repo-tag that should be used for images, # backends and repo-agents if a repo-tag is not given @@ -1858,14 +2059,18 @@ def get_tagged_backend(be, version): # For other versions use the TRITON_VERSION_MAP unless explicitly # given. - if not FLAGS.no_container_build: - FLAGS.container_version, FLAGS.upstream_container_version = get_container_versions( - FLAGS.version, FLAGS.container_version, - FLAGS.upstream_container_version) + FLAGS.container_version, FLAGS.upstream_container_version = container_versions( + FLAGS.version, FLAGS.container_version, + FLAGS.upstream_container_version) + + log('container version {}'.format(FLAGS.container_version)) + log('upstream container version {}'.format( + FLAGS.upstream_container_version)) - log('container version {}'.format(FLAGS.container_version)) - log('upstream container version {}'.format( - FLAGS.upstream_container_version)) + for ep in FLAGS.endpoint: + log(f'endpoint "{ep}"') + for fs in FLAGS.filesystem: + log(f'filesystem "{fs}"') # Initialize map of backends to build and repo-tag for each. backends = {} @@ -1965,24 +2170,6 @@ def get_tagged_backend(be, version): OVERRIDE_BACKEND_CMAKE_FLAGS[be] = {} OVERRIDE_BACKEND_CMAKE_FLAGS[be][parts[0]] = parts[1] - # If --container-build is specified then we perform the actual - # build within a build container and then from that create a - # tritonserver container holding the results of the build. - if not FLAGS.no_container_build: - import docker - - container_build(images, backends, repoagents, FLAGS.endpoint) - sys.exit(0) - - # If there is a container pre-build command assume this invocation - # is being done within the build container and so run the - # pre-build command. - if (FLAGS.container_prebuild_command): - prebuild_command() - - if FLAGS.build_parallel is None: - FLAGS.build_parallel = multiprocessing.cpu_count() * 2 - # Initialize map of common components and repo-tag for each. components = { 'common': default_repo_tag, @@ -2003,71 +2190,133 @@ def get_tagged_backend(be, version): for c in components: log('component "{}" at tag/branch "{}"'.format(c, components[c])) - # Build the core shared library and the server executable. - if not FLAGS.no_core_build: - repo_build_dir = os.path.join(FLAGS.build_dir, 'tritonserver', 'build') - repo_install_dir = os.path.join(FLAGS.build_dir, 'tritonserver', - 'install') - - mkdir(repo_build_dir) - cmake(repo_build_dir, - core_cmake_args(components, backends, repo_install_dir)) - makeinstall(repo_build_dir) - - core_install_dir = FLAGS.install_dir - mkdir(core_install_dir) - cpdir(repo_install_dir, core_install_dir) - - # Build each backend... - for be in backends: - # Core backends are not built separately from core so skip... - if (be in CORE_BACKENDS): - continue - - tagged_be_list = [] - if (be == 'openvino'): - tagged_be_list.append( - get_tagged_backend(be, TRITON_VERSION_MAP[FLAGS.version][4][0])) - if (FLAGS.build_multiple_openvino): - skip = True - for ver in TRITON_VERSION_MAP[FLAGS.version][4]: - if not skip: - tagged_be_list.append(get_tagged_backend(be, ver)) - skip = False - # If armnn_tflite backend, source from external repo for git clone - if be == 'armnn_tflite': - github_organization = 'https://gitlab.com/arm-research/smarter/' + # Set the build, install, and cmake directories to use for the + # generated build scripts and Dockerfiles. If building without + # Docker, these are the directories specified on the cmdline. If + # building with Docker, we change these to be directories within + # FLAGS.tmp_dir inside the Docker container. + script_repo_dir = THIS_SCRIPT_DIR + script_build_dir = FLAGS.build_dir + script_install_dir = script_ci_dir = FLAGS.install_dir + script_cmake_dir = FLAGS.cmake_dir + if not FLAGS.no_container_build: + # FLAGS.tmp_dir may be specified with "\" on Windows, adjust + # to "/" for docker usage. + script_build_dir = os.path.normpath( + os.path.join(FLAGS.tmp_dir, 'tritonbuild').replace("\\", "/")) + script_install_dir = os.path.normpath( + os.path.join(script_build_dir, 'install')) + script_ci_dir = os.path.normpath(os.path.join(script_build_dir, 'ci')) + if target_platform() == 'windows': + script_repo_dir = script_cmake_dir = os.path.normpath( + 'c:/workspace') else: - github_organization = FLAGS.github_organization + script_repo_dir = script_cmake_dir = '/workspace' + + script_name = 'cmake_build' + if target_platform() == 'windows': + script_name += '.ps1' + + # Write the build script that invokes cmake for the core, backends, and repo-agents. + pathlib.Path(FLAGS.build_dir).mkdir(parents=True, exist_ok=True) + with BuildScript( + os.path.join(FLAGS.build_dir, script_name), + verbose=FLAGS.verbose, + desc=('Build script for Triton Inference Server')) as cmake_script: + + # Run the container pre-build command if the cmake build is + # being done within the build container. + if not FLAGS.no_container_build and FLAGS.container_prebuild_command: + cmake_script.cmd(FLAGS.container_prebuild_command, + check_exitcode=True) + cmake_script.blankln() + + # Commands to build the core shared library and the server executable. + if not FLAGS.no_core_build: + core_build(cmake_script, script_repo_dir, script_cmake_dir, + script_build_dir, script_install_dir, components, + backends) + + # Commands to build each backend... + for be in backends: + # Core backends are not built separately from core so skip... + if (be in CORE_BACKENDS): + continue + + tagged_be_list = [] + if (be == 'openvino'): + tagged_be_list.append( + tagged_backend(be, TRITON_VERSION_MAP[FLAGS.version][4][0])) + if (FLAGS.build_multiple_openvino): + skip = True + for ver in TRITON_VERSION_MAP[FLAGS.version][4]: + if not skip: + tagged_be_list.append(tagged_backend(be, ver)) + skip = False + + # If armnn_tflite backend, source from external repo for git clone + if be == 'armnn_tflite': + github_organization = 'https://gitlab.com/arm-research/smarter/' + else: + github_organization = FLAGS.github_organization - if not tagged_be_list: - build_backend(be, backends[be], FLAGS.build_dir, FLAGS.install_dir, - github_organization, images, components, - library_paths) + if not tagged_be_list: + backend_build(be, cmake_script, backends[be], script_build_dir, + script_install_dir, github_organization, images, + components, library_paths) + else: + variant_index = 0 + for tagged_be in tagged_be_list: + backend_build(tagged_be, cmake_script, backends[be], + script_build_dir, script_install_dir, + github_organization, images, components, + library_paths, variant_index) + variant_index += 1 + + # Commands to build each repo agent... + for ra in repoagents: + repo_agent_build(ra, cmake_script, script_build_dir, + script_install_dir, repoagent_repo, repoagents) + + # Commands needed only when building with Docker... + if not FLAGS.no_container_build: + # Commands to collect all the build artifacts needed for CI + # testing. + cibase_build(cmake_script, script_repo_dir, script_cmake_dir, + script_build_dir, script_install_dir, script_ci_dir, + backends) + + # When building with Docker the install and ci artifacts + # written to the build-dir while running the docker container + # may have root ownership, so give them permissions to be + # managed by all users on the host system. + if target_platform() != 'windows': + finalize_build(cmake_script, script_install_dir, script_ci_dir) + + # If --no-container-build is not specified then we perform the + # actual build within a docker container and from that create the + # final tritonserver docker image. For the build we need to + # generate a few Dockerfiles and a top-level script that drives + # the build process. + if not FLAGS.no_container_build: + script_name = 'docker_build' + if target_platform() == 'windows': + script_name += '.ps1' + + create_build_dockerfiles(script_build_dir, images, backends, repoagents, + FLAGS.endpoint) + create_docker_build_script(script_name, script_install_dir, + script_ci_dir) + + # In not dry-run, execute the script to perform the build... If a + # container-based build is requested use 'docker_build' script, + # otherwise build directly on this system using cmake script. + if not FLAGS.dryrun: + if target_platform() == 'windows': + p = subprocess.Popen( + ['powershell.exe', '-noexit', '-File', f'./{script_name}'], + cwd=FLAGS.build_dir) else: - variant_index = 0 - for tagged_be in tagged_be_list: - build_backend(tagged_be, backends[be], FLAGS.build_dir, - FLAGS.install_dir, github_organization, images, - components, library_paths, variant_index) - variant_index += 1 - - # Build each repo agent... - for ra in repoagents: - repo_build_dir = os.path.join(FLAGS.build_dir, ra, 'build') - repo_install_dir = os.path.join(FLAGS.build_dir, ra, 'install') - - mkdir(FLAGS.build_dir) - gitclone(FLAGS.build_dir, repoagent_repo(ra), repoagents[ra], ra, - FLAGS.github_organization) - mkdir(repo_build_dir) - cmake(repo_build_dir, - repoagent_cmake_args(images, components, ra, repo_install_dir)) - makeinstall(repo_build_dir) - - repoagent_install_dir = os.path.join(FLAGS.install_dir, 'repoagents', - ra) - rmdir(repoagent_install_dir) - mkdir(repoagent_install_dir) - cpdir(os.path.join(repo_install_dir, 'repoagents', ra), - repoagent_install_dir) + p = subprocess.Popen([f'./{script_name}'], cwd=FLAGS.build_dir) + p.wait() + fail_if(p.returncode != 0, 'build failed') diff --git a/compose.py b/compose.py index b5478298fd..e3e3b68929 100644 --- a/compose.py +++ b/compose.py @@ -107,7 +107,7 @@ def add_requested_backends(ddir, dockerfile_name, backends): if backend == 'openvino': import build ver = next(iter(build.TRITON_VERSION_MAP.values())) - backend = build.get_tagged_backend(backend, ver[4][0]) + backend = build.tagged_backend(backend, ver[4][0]) df += '''COPY --chown=1000:1000 --from=full /opt/tritonserver/backends/{} /opt/tritonserver/backends/{} '''.format(backend, backend) if len(backends) > 0: diff --git a/docs/build.md b/docs/build.md index 3c4adcb64f..7039f217d9 100644 --- a/docs/build.md +++ b/docs/build.md @@ -28,22 +28,32 @@ # Building Triton -This section gives an overview of how to build the Triton server. For +This section describes how to build the Triton server from source. For information on building the Triton client libraries and examples see [Client Libraries and Examples](https://github.com/triton-inference-server/client). For -information on building the Triton SDK container see -[Build SDK Image](test.md#build-sdk-image). +information on building the Triton SDK container see [Build SDK +Image](test.md#build-sdk-image). For information on testing your +Triton build see [Testing Triton](test.md). You can create a customized Triton Docker image that contains a subset -of the released backends without building. For example, you may want a -Triton image that contains only the TensorRT and Python backends. For -this type of customization you don't need to build Triton from source -and instead can use [the *compose* utility](compose.md). - -Triton server is built using the [build.py](../build.py) script. The -build.py script currently supports building for the following -platforms. See [Building on Unsupported +of the released backends without building from source. For example, +you may want a Triton image that contains only the TensorRT and Python +backends. For this type of customization you don't need to build +Triton from source and instead can use [the *compose* +utility](compose.md). + +The Triton source is distributed across multiple GitHub repositories +that together can be built and installed to create a complete Triton +installation. Triton server is built using CMake and (optionally) +Docker. To simplify the build process, Triton provides a +[build.py](../build.py) script. The build.py script will generate the +CMake and Docker build steps required to build Triton, and will +optionally invoke those steps or leave the invocation to you, as +described below. + +The build.py script currently supports building Triton for the +following platforms. See [Building on Unsupported Platforms](#building-on-unsupported-platforms) if you are attempting to build Triton on a platform that is not listed here. @@ -63,69 +73,121 @@ non-Docker build. * [Build without Docker](#ubuntu-without-docker). -### Building with Docker +### Building With Docker The easiest way to build Triton is to use Docker. The result of the build will be a Docker image called *tritonserver* that will contain the tritonserver executable in /opt/tritonserver/bin and the required -shared libraries in /opt/tritonserver/lib. The backends built for -Triton will be in /opt/tritonserver/backends. +shared libraries in /opt/tritonserver/lib. The backends and +repository-agents built for Triton will be in +/opt/tritonserver/backends and /opt/tritonserver/repoagents, +respectively. -The first step for any build is to checkout the +The first step for the build is to clone the [triton-inference-server/server](https://github.com/triton-inference-server/server) repo branch for the release you are interested in building (or the *main* branch to build from the development branch). Then run build.py as described below. The build.py script performs these steps when building with Docker. -* Fetch the appropriate minimal/base image. When building with GPU - support (--enable-gpu), the *min* image is the \-py3-min - image pulled from [NGC](https://ngc.nvidia.com) that contains the - CUDA, cuDNN, TensorRT and other dependencies that are required to - build Triton. When building without GPU support, the *min* image is - the standard ubuntu:20.04 image. +* In the *build* subdirectory of the server repo, generate the + docker_build script, the cmake_build script and the Dockerfiles + needed to build Triton. If you use the --dryrun flag, build.py will + stop here so that you can examine these files. + +* Run the docker_build script to perform the Docker-based build. The + docker_build script performs the following steps. + + * Build the *tritonserver_buildbase* Docker image that collects all + the build dependencies needed to build Triton. The + *tritonserver_buildbase* image is based on a minimal/base + image. When building with GPU support (--enable-gpu), the *min* + image is the + [\-py3-min](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver) + image pulled from [NGC](https://ngc.nvidia.com) that contains the + CUDA, cuDNN, TensorRT and other dependencies that are required to + build Triton. When building without GPU support, the *min* image + is the standard ubuntu:20.04 image. + + * Run the cmake_build script within the *tritonserver_buildbase* + image to actually build Triton. The cmake_build script performs + the following steps. + + * Invoke CMake in the server repo to build Triton's core shared + library and *tritonserver* executable. + + * Clone each requested backend and build it using CMake. For + example, the ONNX Runtime backend is built using + [triton-inference-server/onnxruntime_backend/CMakeLists.txt](https://github.com/triton-inference-server/onnxruntime_backend/blob/main/CMakeLists.txt). Some + of the backends may use Docker as part of their build (for + example [ONNX + Runtime](https://github.com/triton-inference-server/onnxruntime_backend) + and + [OpenVINO](https://github.com/triton-inference-server/openvino_backend)). If + you don't want to use Docker in those cases you must consult the + build process for those backends. + + * Clone each repository agent and build it using the CMake file + from the corresponding repo. For example, the + [Checksum](https://github.com/triton-inference-server/checksum_repository_agent) + repository agent is built using + [triton-inference-server/checksum_repository_agent/CMakeLists.txt](https://github.com/triton-inference-server/checksum_repository_agent/blob/main/CMakeLists.txt). + + * Copy the built artifacts out of the container and into the build + subdirectory on the host system. + + * Create the final *tritonserver* Docker image that contains the + libraries, executables and other artifacts from the build. + + * Create a *tritonserver_cibase* Docker image that contains the QA + artifacts needed for testing, as described in [Testing + Triton](test.md). -* Create a *tritonserver_buildbase* Docker image that adds additional - build dependencies to the *min* image. +By default, build.py does not enable any of Triton's optional features +but you can enable all features, backends, and repository agents with +the --enable-all flag. The -v flag turns on verbose output. -* Run build.py within the *tritonserver_buildbase* image to actually - build Triton. See [Build without Docker](#ubuntu-without-docker) for - more details on this part of the build process. The result of this - step is a *tritonserver_build* image that contains the built Triton - artifacts. +```bash +./build.py -v --enable-all +``` -* Create the final *tritonserver* Docker image by extracting the - appropriate libraries, executables and other artifacts from - *tritonserver_build*. +If you want to enable only certain Triton features, backends and +repository agents, do not specify --enable-all. Instead you must +specify the individual flags as documented by --help. -By default, build.py does not enable any of Triton's optional features -and so you must enable them explicitly. The following build.py -invocation builds all features, backends, and repository agents. +#### Building With Specific GitHub Branches + +As described above, the build is performed in the server repo, but +source from several other repos is fetched during the build +process. Typically you do not need to specify anything about these +other repos, but if you want to control which branch is used in these +other repos you can as shown in the following example. ```bash -./build.py --cmake-dir=/build --build-dir=/tmp/citritonbuild --enable-logging --enable-stats --enable-tracing --enable-metrics --enable-gpu-metrics --enable-gpu --filesystem=gcs --filesystem=azure_storage --filesystem=s3 --endpoint=http --endpoint=grpc --repo-tag=common: --repo-tag=core: --repo-tag=backend: --repo-tag=thirdparty: --backend=ensemble --backend=tensorrt: --backend=identity: --backend=repeat: --backend=square: --backend=onnxruntime: --backend=pytorch: --backend=tensorflow1: --backend=tensorflow2: --backend=openvino: --backend=python: --backend=dali: --backend=fil: --repoagent=checksum: +./build.py ... --repo-tag=common: --repo-tag=core: --repo-tag=backend: --repo-tag=thirdparty: ... --backend=tensorrt: ... --repoagent=checksum: ... ``` -If you are building on *main* branch then `` will -default to "main". If you are building on a release branch then -`` will default to the branch name. For example, if you -are building on the r22.03 branch, `` will default to -r22.03. Therefore, you typically do not need to provide `` at all (nor the preceding colon). You can use a different -`` for a component to instead use the corresponding -branch/tag in the build. For example, if you have a branch called -"mybranch" in the -[identity_backend](https://github.com/triton-inference-server/identity_backend) +If you are building on a release branch then `` will +default to the branch name. For example, if you are building on the +r22.03 branch, `` will default to r22.03. If you are +building on any other branch (including the *main* branch) then +`` will default to "main". Therefore, you typically do +not need to provide `` at all (nor the preceding +colon). You can use a different `` for a component to +instead use the corresponding branch/tag in the build. For example, if +you have a branch called "mybranch" in the +[onnxruntime_backend](https://github.com/triton-inference-server/onnxruntime_backend) repo that you want to use in the build, you would specify ---backend=identity:mybranch. +--backend=onnxruntime:mybranch. -#### CPU-only container +#### CPU-Only Build -If you want to build without GPU support remove the `--enable-gpu` and +If you want to build without GPU support you must specify individual +feature flags and not include the `--enable-gpu` and `--enable-gpu-metrics` flags. Only the following backends are available for a non-GPU / CPU-only build: `identity`, `repeat`, -`square`, `tensorflow2`, `onnxruntime`, `openvino`, `python` and -`fil`. +`square`, `tensorflow2`, `pytorch`, `onnxruntime`, `openvino`, +`python` and `fil`. To include the TensorFlow2 backend in your CPU-only build, you must provide this additional flag to build.py: @@ -136,68 +198,52 @@ flag. This is needed since the CPU-only builds of the TensorFlow and PyTorch backends require some CUDA stubs and runtime dependencies that are not present in the CPU-only base container. -### Building without Docker +### Building Without Docker To build Triton without using Docker you must install the build dependencies that are handled automatically when building with Docker. -The building with GPU support (--enable-gpu), these dependencies -include [CUDA and cuDNN](#cuda-cublas-cudnn) and -[TensorRT](#tensorrt). For both GPU and CPU-only builds the -dependencies also include those listed in the -create_dockerfile_buildbase() function of [build.py](../build.py). + +The first step for the build is to clone the +[triton-inference-server/server](https://github.com/triton-inference-server/server) +repo branch for the release you are interested in building (or the +*main* branch to build from the development branch). + +To determine what dependencies are required by the build, run build.py +with the --dryrun flag, and then looking in the build subdirectory at +Dockerfile.buildbase. + +```bash +./build.py -v --enable-all +``` + +From Dockerfile.buildbase you can see what dependencies you need to +install on your host system. Note that when building with --enable-gpu +(or --enable-all), Dockerfile.buildbase depends on the +[\-py3-min](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver) +image pulled from [NGC](https://ngc.nvidia.com). Unfortunately, a +Dockerfile is not currently available for the +[\-py3-min](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver) +image. Instead, you must manually install [CUDA and +cuDNN](#cuda-cublas-cudnn) and [TensorRT](#tensorrt) dependencies as +described below. Once you have installed these dependencies on your build system you can then use build.py with the --no-container-build flag to build -Triton. See the build.py invocation in [Build using -Docker](#ubuntu-docker) for an example of how to run build.py. You can -use that same invocation with the --no-container-build flag to build -without Docker. +Triton. -The first step for any build is to checkout the -[triton-inference-server/server](https://github.com/triton-inference-server/server) -repo branch for the release you are interested in building (or the -*main* branch to build from the development branch). Then run build.py -as described below. The build.py script will perform the following -steps (note that if you are building with Docker that these same steps -will be performed during the Docker build within the -*tritonserver_build* container). - -* Use the CMake files in [build](../build) to build Triton's core - shared library and *tritonserver* executable. - -* Fetch each requested backend and build it using the CMake file from - the corresponding backend repo. For example, the ONNX Runtime - backend is built using - [triton-inference-server/onnxruntime_backend/CMakeLists.txt](https://github.com/triton-inference-server/onnxruntime_backend/blob/main/CMakeLists.txt). Some - of the backends may use Docker as part of their build (for example - [ONNX - Runtime](https://github.com/triton-inference-server/onnxruntime_backend) - and - [OpenVINO](https://github.com/triton-inference-server/openvino_backend)). If - you don't want to use Docker in those cases you must consult the - build process for those backends. - -* Fetch each repository agent and build it using the CMake file from - the corresponding repo. For example, the - [Checksum](https://github.com/triton-inference-server/checksum_repository_agent) - repository agent is built using - [triton-inference-server/checksum_repository_agent/CMakeLists.txt](https://github.com/triton-inference-server/checksum_repository_agent/blob/main/CMakeLists.txt). - -By default build.py clones Triton repos from -. Use the ---github-organization options to select a different URL. - -The backends can also be built independently in each of the backend -repositories. See the [backend -repo](https://github.com/triton-inference-server/backend) for more -information. +```bash +./build.py -v --no-container-build --build-dir=`pwd`/build --enable-all +``` + +See [Building with Docker](#ubuntu-docker) for more details on how the +cmake_build script is used to perform the build. #### CUDA, cuBLAS, cuDNN For Triton to support NVIDIA GPUs you must install CUDA, cuBLAS and -cuDNN. These libraries must be installed on system include and library -paths so that they are available for the build. The version of the -libraries used for a given release can be found in the [Framework +cuDNN. These libraries must be installed on the system include and +library paths so that they are available for the build. The version of +the libraries used for a given release can be found in the [Framework Containers Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). @@ -207,10 +253,10 @@ execution issues since non-supported versions are not tested. #### TensorRT -The TensorRT includes and libraries must be installed on system -include and library paths so that they are available for the -build. The version of TensorRT used in a given release can be found in -the [Framework Containers Support +The TensorRT headers and libraries must be installed on system include +and library paths so that they are available for the build. The +version of TensorRT used in a given release can be found in the +[Framework Containers Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). For a given version of Triton you can attempt to build with @@ -225,27 +271,24 @@ issues since non-supported versions are not tested. For Windows 10, build.py supports both a Docker build and a non-Docker build in a similar way as described for [Ubuntu](#ubuntu). The primary -difference is that the \-py3-min image used as the base of the -Ubuntu Docker build is not available for Windows and so you must -generated it yourself, as described below. For a non-Docker build you -must install the dependencies contained in this base Dockerfile on -your build system. +difference is that the minimal/base image used as the base of +Dockerfile.buildbase image can be built from the provided +[Dockerfile.win10.min](../Dockerfile.win10.min) file as described in +[Windows 10 "Min" Image](#windows-10-min-image). When running build.py +use the --image flag to specify the tag that you assigned to this +image. For example, --image=base,win10-py3-min. ### Windows and Docker Depending on your version of Windows 10 and your version of Docker you may need to perform these additional steps before any of the following -steps. +step. * Set your Docker to work with "Windows containers". Right click on the whale icon in the lower-right status area and select "Switch to Windows containers". -* When running "docker build" or "docker run" you may need to specify - '--network="Default Switch"' if you see errors like "remote name - could not be resolved". - -### Windows 10 "Min" Container +### Windows 10 "Min" Image The "min" container describes the base dependencies needed to perform the Windows build. The Windows min container is @@ -273,29 +316,9 @@ docker build -t win10-py3-min -f Dockerfile.win10.min . ### Build Triton Server -Triton is built using the build.py script. The build system must have -Docker, Python3 (plus pip installed *docker* module) and git installed -so that it can execute build.py and perform a docker build. By -default, build.py does not enable any of Triton's optional features -and so you must enable them explicitly. The following build.py -invocation builds all features and backends available on windows. - -```bash -python build.py --cmake-dir=/build --build-dir=/tmp/citritonbuild --no-container-pull --image=base,win10-py3-min --enable-logging --enable-stats --enable-tracing --enable-gpu --endpoint=grpc --endpoint=http --repo-tag=common: --repo-tag=core: --repo-tag=backend: --repo-tag=thirdparty: --backend=ensemble --backend=tensorrt: --backend=onnxruntime: --backend=openvino: -``` - -If you are building on *main* branch then '' will -default to "main". If you are building on a release branch then -'' will default to the branch name. For example, if you -are building on the r22.03 branch, '' will default to -r22.03. Therefore, you typically do not need to provide '' at all (nor the preceding colon). You can use a different -'' for a component to instead use the corresponding -branch/tag in the build. For example, if you have a branch called -"mybranch" in the -[onnxruntime_backend](https://github.com/triton-inference-server/onnxruntime_backend) -repo that you want to use in the build, you would specify ---backend=onnxruntime:mybranch. +The Windows build system must have Docker, Python3 and git +installed. See [Building with Docker](#ubuntu-docker) for more details +on how to run build.py. ### Extract Build Artifacts @@ -305,33 +328,44 @@ artifacts. Windows containers do not support GPU access so you likely want to extract the necessary files from the tritonserver image and run them directly on your host system. All the Triton artifacts can be found in /opt/tritonserver directory of the tritonserver image. Your -host system will need to install the same CUDA, cuDNN and TensorRT -versions that were used for the build. +host system will need to install the CUDA, cuDNN, TensorRT and other +dependencies that were used for the build. ## Building on Unsupported Platforms Building for an unsupported OS and/or hardware platform is -possible. All of the build scripting and CMake files are included in -the public repos. However, due to differences in compilers, libraries, +possible. All of the build scripting, Dockerfiles and CMake +invocations are included in the public repos or are generated by +build.py as described in [Building with Docker](#ubuntu-docker). From +these files you can find the required dependencies and CMake +invocations. However, due to differences in compilers, libraries, package management, etc. you may have to make changes in the build -scripts, CMake files and the source code. +scripts, Dockerfiles, CMake files and the source code. + +To see the generated build scripts and Dockerfiles referred to below, +use: + +```bash +./build.py -v --enable-all --dryrun +``` You should familiarize yourself with the build process for supported platforms by reading the above documentation and then follow the process for the supported platform that most closely matches the platform you are interested in (for example, if you are trying to build for RHEL/x86-64 then follow the [Building for Ubuntu -20.04](#building-for-ubuntu-2004) process. You will likely need to make -changes in the following areas. +20.04](#building-for-ubuntu-2004) process. You will likely need to +make changes in the following areas and then manually run docker_build +and cmake_build or the equivalent commands to perform a build. -* The build.py script installs dependencies for the build using +* The generated Dockerfiles install dependencies for the build using platform-specific packaging tools, for example, apt-get for Ubuntu. You will need to change build.py to use the packaging tool appropriate for your platform. * The package and libraries names for your platform may differ from - those used by build.py when installing dependencies. You will need - to find the corresponding packages on libraries on your platform. + those used by the generated Dockerfiles. You will need to find the + corresponding packages on libraries on your platform. * Your platform may use a different compiler or compiler version than the support platforms. As a result you may encounter build errors @@ -340,7 +374,7 @@ changes in the following areas. * Triton depends on a large number of open-source packages that it builds from source. If one of these packages does not support your - platform them you may need to disable the Triton feature that + platform then you may need to disable the Triton feature that depends on that package. For example, Triton supports the S3 filesystem by building the aws-sdk-cpp package. If aws-sdk-cpp doesn't build for your platform then you can remove the need for @@ -352,12 +386,12 @@ changes in the following areas. [TensorFlow](https://github.com/triton-inference-server/tensorflow_backend) backend extracts pre-built shared libraries from the TensorFlow NGC container as part of the build. This container is only available for - Ubuntu-20.04 / x86-64 and so if you require the TensorFlow backend - for your platform you will need download the TensorFlow container - and modify its build to produce shared libraries for your - platform. You must use the TensorFlow source and build scripts from - within the NGC container because they contain Triton-specific - patches that are required for the Triton TensorFlow backend. + Ubuntu-20.04 / x86-64, so if you require the TensorFlow backend for + your platform you will need download the TensorFlow container and + modify its build to produce shared libraries for your platform. You + must use the TensorFlow source and build scripts from within the NGC + container because they contain Triton-specific patches that are + required for the Triton TensorFlow backend. * By default, the [PyTorch](https://github.com/triton-inference-server/pytorch_backend) @@ -368,6 +402,6 @@ changes in the following areas. ## Building with Debug Symbols -To build with Debug symbols, use the --build-type=Debug arguement while -launching build.py. You can then launch the built server with gdb and see -the debug symbols/information in the gdb trace. +To build with Debug symbols, use the --build-type=Debug argument while +launching build.py. You can then launch the built server with gdb and +see the debug symbols/information in the gdb trace. diff --git a/docs/test.md b/docs/test.md index 10fcf269fb..fd201aa832 100644 --- a/docs/test.md +++ b/docs/test.md @@ -74,7 +74,7 @@ Next you need to build a QA version of the Triton Docker image. This image will contain Triton, the QA tests, and all the dependencies needed to run the QA tests. First do a [Docker image build](build.md#building-triton-with-docker) to produce the -*tritonserver_build* and *tritonserver* images. +*tritonserver_cibase* and *tritonserver* images. Then, build the actual QA image. @@ -129,4 +129,6 @@ $ BACKENDS="plan" ENSEMBLES=0 EXPECTED_NUM_TESTS= bash -x ./test.sh ``` Where '' is the number of sub-tests expected to be run for -just TensorRT testing and no ensembles. +just TensorRT testing and no ensembles. Depending on which backend(s) +you are testing you will need to experiment and determine the correct +value for ''. diff --git a/qa/common/check_copyright.py b/qa/common/check_copyright.py index 8948809b8c..440e54899e 100755 --- a/qa/common/check_copyright.py +++ b/qa/common/check_copyright.py @@ -36,6 +36,7 @@ 'jmx', 'gz', 'json', 'pdf', 'so', 'onnx') REPO_PATH_FROM_THIS_FILE = '../..' SKIP_PATHS = ( + 'build', 'deploy/gke-marketplace-app/.gitignore', 'deploy/gke-marketplace-app/server-deployer/chart/.helmignore', 'deploy/gcp/.helmignore', 'deploy/aws/.helmignore',