Skip to content

Added fallback with portable kernels #6

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jan 28, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 41 additions & 28 deletions backends/openvino/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,58 +1,71 @@
# Set C++ standard
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

# Ensure compile_commands are generated
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)

set(_common_include_directories ${CMAKE_CURRENT_SOURCE_DIR}/../../..)
# Define common include directories
set(COMMON_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../../..)

include_directories(BEFORE ${_common_include_directories})
# Include common directories before others to ensure proper order
include_directories(BEFORE ${COMMON_INCLUDE_DIRS})

# Source root directory for executorch.
# Set up EXECUTORCH_ROOT if not already set
if(NOT EXECUTORCH_ROOT)
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..)
endif()

# Include utility cmake script from the executorch repository
include(${EXECUTORCH_ROOT}/build/Utils.cmake)

set(_common_include_directories ${EXECUTORCH_ROOT}/..)
# Update common include directory for ExecuteTorch
set(COMMON_INCLUDE_DIRS ${EXECUTORCH_ROOT}/..)

# Set openvino directory from environment
# Set OpenVINO directory and include directories from environment variable
set(OPENVINO_DIR "$ENV{INTEL_OPENVINO_DIR}")
set(OPENVINO_INCLUDE_DIRS ${OPENVINO_DIR}/deployment_tools/inference_engine/include ${OPENVINO_DIR}/runtime/include)
if(NOT OPENVINO_DIR)
message(FATAL_ERROR "INTEL_OPENVINO_DIR environment variable is not set.")
endif()

set(OPENVINO_INCLUDE_DIRS
${OPENVINO_DIR}/deployment_tools/inference_engine/include
${OPENVINO_DIR}/runtime/include
)

# Add the OpenVINO backend library
# Define OpenVINO library path
set(OPENVINO_LIB_PATH ${OPENVINO_DIR}/runtime/lib/intel64)

# Define OpenVINO libraries
set(OPENVINO_LIB ${OPENVINO_LIB_PATH}/libopenvino.so)

# Add the OpenVINO backend library as a shared library
add_library(openvino_backend SHARED)

# Enable exceptions and RTTI for OpenVINO backend
target_compile_options(openvino_backend PRIVATE "-frtti" "-fexceptions")

# Include directories for ExecuteTorch and OpenVINO
target_include_directories(
openvino_backend PUBLIC ${_common_include_directories}
openvino_backend PUBLIC
${COMMON_INCLUDE_DIRS}
${OPENVINO_INCLUDE_DIRS}
)

target_include_directories(
openvino_backend PUBLIC ${OPENVINO_INCLUDE_DIRS}
# Link OpenVINO libraries and executorch core to the backend
target_link_libraries(openvino_backend PRIVATE
${OPENVINO_LIB}
executorch_core
)

set(OPENVINO_LIB_PATH ${OPENVINO_DIR}/runtime/lib/intel64)
set(OPENVINO_LIBS
${OPENVINO_LIB_PATH}/libopenvino.so.2025.0.0
${OPENVINO_LIB_PATH}/libopenvino_ir_frontend.so.2025.0.0
${OPENVINO_LIB_PATH}/libopenvino_c.so
${OPENVINO_LIB_PATH}/libopenvino_intel_cpu_plugin.so
${OPENVINO_LIB_PATH}/libopenvino_intel_gpu_plugin.so
${OPENVINO_LIB_PATH}/libopenvino_auto_plugin.so
# Add source files to the OpenVINO backend library
target_sources(openvino_backend PRIVATE
${CMAKE_CURRENT_LIST_DIR}/runtime/OpenvinoBackend.cpp
)

# Link the OpenVINO library to the backend
target_link_libraries(openvino_backend PRIVATE ${OPENVINO_LIBS} executorch_core)

target_sources(
openvino_backend
PRIVATE ${CMAKE_CURRENT_LIST_DIR}/runtime/OpenvinoBackend.cpp
)
# Set additional link options for shared library
target_link_options(openvino_backend PRIVATE -Wl,-rpath=${OPENVINO_LIB_PATH})

target_link_options_shared_lib(openvino_backend)
# Install the OpenVINO backend library to the lib directory
install(TARGETS openvino_backend DESTINATION lib)



242 changes: 242 additions & 0 deletions backends/openvino/openvino_functions.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,242 @@
# This yaml file contains operators that are unsupported with openvino backend and
# will use portable kernels for fall back

- op: _cdist_forward.out
kernels:
- arg_meta: null
kernel_name: torch::executor::_cdist_forward_out

- op: _pdist_forward.out
kernels:
- arg_meta: null
kernel_name: torch::executor::_pdist_forward_out

- op: alias_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::alias_copy_out

- op: any.all_out
kernels:
- arg_meta: null
kernel_name: torch::executor::any_all_out

- op: any.dims_out
kernels:
- arg_meta: null
kernel_name: torch::executor::any_dims_out

- op: atan.out
kernels:
- arg_meta: null
kernel_name: torch::executor::atan_out

- op: atan2.out
kernels:
- arg_meta: null
kernel_name: torch::executor::atan2_out

- op: bitwise_or.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_or_Scalar_out

- op: bitwise_xor.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_xor_Scalar_out

- op: clamp.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::clamp_tensor_out

- op: convolution_backward.out
kernels:
- arg_meta: null
kernel_name: torch::executor::convolution_backward_out

- op: detach_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::detach_copy_out

- op: diagonal_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::diagonal_copy_out

- op: expm1.out
kernels:
- arg_meta: null
kernel_name: torch::executor::expm1_out

- op: floor_divide.out
kernels:
- arg_meta: null
kernel_name: torch::executor::floor_divide_out

- op: index_put.out
kernels:
- arg_meta: null
kernel_name: torch::executor::index_put_out

- op: logical_and.out
kernels:
- arg_meta: null
kernel_name: torch::executor::logical_and_out

- op: logical_or.out
kernels:
- arg_meta: null
kernel_name: torch::executor::logical_or_out

- op: logical_xor.out
kernels:
- arg_meta: null
kernel_name: torch::executor::logical_xor_out

- op: logit.out
kernels:
- arg_meta: null
kernel_name: torch::executor::logit_out

- op: masked_scatter.out
kernels:
- arg_meta: null
kernel_name: torch::executor::masked_scatter_out

- op: masked_select.out
kernels:
- arg_meta: null
kernel_name: torch::executor::masked_select_out

- op: narrow_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::narrow_copy_out

- op: nonzero.out
kernels:
- arg_meta: null
kernel_name: torch::executor::nonzero_out

- op: pixel_shuffle.out
kernels:
- arg_meta: null
kernel_name: torch::executor::pixel_shuffle_out

- op: pixel_unshuffle.out
kernels:
- arg_meta: null
kernel_name: torch::executor::pixel_unshuffle_out

- op: prod.int_out
kernels:
- arg_meta: null
kernel_name: torch::executor::prod_int_out

- op: prod.out
kernels:
- arg_meta: null
kernel_name: torch::executor::prod_out

- op: remainder.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::remainder_Tensor_out

- op: remainder.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::remainder_Scalar_out

- op: repeat_interleave.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::repeat_interleave_Tensor_out

- op: reflection_pad1d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::reflection_pad1d_out

- op: reflection_pad3d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::reflection_pad3d_out

- op: replication_pad1d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::replication_pad1d_out

- op: replication_pad2d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::replication_pad2d_out

- op: replication_pad3d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::replication_pad3d_out

- op: round.out
kernels:
- arg_meta: null
kernel_name: torch::executor::round_out

- op: scatter_add.out
kernels:
- arg_meta: null
kernel_name: torch::executor::scatter_add_out

- op: split_copy.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::split_copy_Tensor_out

- op: squeeze_copy.dim_out
kernels:
- arg_meta: null
kernel_name: torch::executor::squeeze_copy_dim_out

- op: sub.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::sub_scalar_out

- op: t_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::t_copy_out

- op: transpose_copy.int_out
kernels:
- arg_meta: null
kernel_name: torch::executor::transpose_copy_int_out

- op: trunc.out
kernels:
- arg_meta: null
kernel_name: torch::executor::trunc_out

- op: unbind_copy.int_out
kernels:
- arg_meta: null
kernel_name: torch::executor::unbind_copy_int_out

- op: upsample_bilinear2d.vec_out
kernels:
- arg_meta: null
kernel_name: torch::executor::upsample_bilinear2d_vec_out

- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: torch::executor::_empty_dim_order_out

- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: torch::executor::_to_dim_order_copy_out
Loading
Loading