Skip to content

Commit 3c6d123

Browse files
authored
Merge pull request #6 from ynimmaga/portable_kernels
Added fallback with portable kernels
2 parents 7c43014 + 5c55a56 commit 3c6d123

File tree

6 files changed

+432
-187
lines changed

6 files changed

+432
-187
lines changed

backends/openvino/CMakeLists.txt

Lines changed: 41 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,58 +1,71 @@
1+
# Set C++ standard
12
set(CMAKE_CXX_STANDARD 17)
23
set(CMAKE_CXX_STANDARD_REQUIRED ON)
34

5+
# Ensure compile_commands are generated
46
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
57

6-
set(_common_include_directories ${CMAKE_CURRENT_SOURCE_DIR}/../../..)
8+
# Define common include directories
9+
set(COMMON_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../../..)
710

8-
include_directories(BEFORE ${_common_include_directories})
11+
# Include common directories before others to ensure proper order
12+
include_directories(BEFORE ${COMMON_INCLUDE_DIRS})
913

10-
# Source root directory for executorch.
14+
# Set up EXECUTORCH_ROOT if not already set
1115
if(NOT EXECUTORCH_ROOT)
1216
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..)
1317
endif()
1418

19+
# Include utility cmake script from the executorch repository
1520
include(${EXECUTORCH_ROOT}/build/Utils.cmake)
1621

17-
set(_common_include_directories ${EXECUTORCH_ROOT}/..)
22+
# Update common include directory for ExecuteTorch
23+
set(COMMON_INCLUDE_DIRS ${EXECUTORCH_ROOT}/..)
1824

19-
# Set openvino directory from environment
25+
# Set OpenVINO directory and include directories from environment variable
2026
set(OPENVINO_DIR "$ENV{INTEL_OPENVINO_DIR}")
21-
set(OPENVINO_INCLUDE_DIRS ${OPENVINO_DIR}/deployment_tools/inference_engine/include ${OPENVINO_DIR}/runtime/include)
27+
if(NOT OPENVINO_DIR)
28+
message(FATAL_ERROR "INTEL_OPENVINO_DIR environment variable is not set.")
29+
endif()
30+
31+
set(OPENVINO_INCLUDE_DIRS
32+
${OPENVINO_DIR}/deployment_tools/inference_engine/include
33+
${OPENVINO_DIR}/runtime/include
34+
)
2235

23-
# Add the OpenVINO backend library
36+
# Define OpenVINO library path
37+
set(OPENVINO_LIB_PATH ${OPENVINO_DIR}/runtime/lib/intel64)
38+
39+
# Define OpenVINO libraries
40+
set(OPENVINO_LIB ${OPENVINO_LIB_PATH}/libopenvino.so)
41+
42+
# Add the OpenVINO backend library as a shared library
2443
add_library(openvino_backend SHARED)
44+
45+
# Enable exceptions and RTTI for OpenVINO backend
2546
target_compile_options(openvino_backend PRIVATE "-frtti" "-fexceptions")
2647

2748
# Include directories for ExecuteTorch and OpenVINO
2849
target_include_directories(
29-
openvino_backend PUBLIC ${_common_include_directories}
50+
openvino_backend PUBLIC
51+
${COMMON_INCLUDE_DIRS}
52+
${OPENVINO_INCLUDE_DIRS}
3053
)
3154

32-
target_include_directories(
33-
openvino_backend PUBLIC ${OPENVINO_INCLUDE_DIRS}
55+
# Link OpenVINO libraries and executorch core to the backend
56+
target_link_libraries(openvino_backend PRIVATE
57+
${OPENVINO_LIB}
58+
executorch_core
3459
)
3560

36-
set(OPENVINO_LIB_PATH ${OPENVINO_DIR}/runtime/lib/intel64)
37-
set(OPENVINO_LIBS
38-
${OPENVINO_LIB_PATH}/libopenvino.so.2025.0.0
39-
${OPENVINO_LIB_PATH}/libopenvino_ir_frontend.so.2025.0.0
40-
${OPENVINO_LIB_PATH}/libopenvino_c.so
41-
${OPENVINO_LIB_PATH}/libopenvino_intel_cpu_plugin.so
42-
${OPENVINO_LIB_PATH}/libopenvino_intel_gpu_plugin.so
43-
${OPENVINO_LIB_PATH}/libopenvino_auto_plugin.so
61+
# Add source files to the OpenVINO backend library
62+
target_sources(openvino_backend PRIVATE
63+
${CMAKE_CURRENT_LIST_DIR}/runtime/OpenvinoBackend.cpp
4464
)
4565

46-
# Link the OpenVINO library to the backend
47-
target_link_libraries(openvino_backend PRIVATE ${OPENVINO_LIBS} executorch_core)
48-
49-
target_sources(
50-
openvino_backend
51-
PRIVATE ${CMAKE_CURRENT_LIST_DIR}/runtime/OpenvinoBackend.cpp
52-
)
66+
# Set additional link options for shared library
67+
target_link_options(openvino_backend PRIVATE -Wl,-rpath=${OPENVINO_LIB_PATH})
5368

54-
target_link_options_shared_lib(openvino_backend)
69+
# Install the OpenVINO backend library to the lib directory
5570
install(TARGETS openvino_backend DESTINATION lib)
5671

57-
58-
Lines changed: 242 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,242 @@
1+
# This yaml file contains operators that are unsupported with openvino backend and
2+
# will use portable kernels for fall back
3+
4+
- op: _cdist_forward.out
5+
kernels:
6+
- arg_meta: null
7+
kernel_name: torch::executor::_cdist_forward_out
8+
9+
- op: _pdist_forward.out
10+
kernels:
11+
- arg_meta: null
12+
kernel_name: torch::executor::_pdist_forward_out
13+
14+
- op: alias_copy.out
15+
kernels:
16+
- arg_meta: null
17+
kernel_name: torch::executor::alias_copy_out
18+
19+
- op: any.all_out
20+
kernels:
21+
- arg_meta: null
22+
kernel_name: torch::executor::any_all_out
23+
24+
- op: any.dims_out
25+
kernels:
26+
- arg_meta: null
27+
kernel_name: torch::executor::any_dims_out
28+
29+
- op: atan.out
30+
kernels:
31+
- arg_meta: null
32+
kernel_name: torch::executor::atan_out
33+
34+
- op: atan2.out
35+
kernels:
36+
- arg_meta: null
37+
kernel_name: torch::executor::atan2_out
38+
39+
- op: bitwise_or.Scalar_out
40+
kernels:
41+
- arg_meta: null
42+
kernel_name: torch::executor::bitwise_or_Scalar_out
43+
44+
- op: bitwise_xor.Scalar_out
45+
kernels:
46+
- arg_meta: null
47+
kernel_name: torch::executor::bitwise_xor_Scalar_out
48+
49+
- op: clamp.Tensor_out
50+
kernels:
51+
- arg_meta: null
52+
kernel_name: torch::executor::clamp_tensor_out
53+
54+
- op: convolution_backward.out
55+
kernels:
56+
- arg_meta: null
57+
kernel_name: torch::executor::convolution_backward_out
58+
59+
- op: detach_copy.out
60+
kernels:
61+
- arg_meta: null
62+
kernel_name: torch::executor::detach_copy_out
63+
64+
- op: diagonal_copy.out
65+
kernels:
66+
- arg_meta: null
67+
kernel_name: torch::executor::diagonal_copy_out
68+
69+
- op: expm1.out
70+
kernels:
71+
- arg_meta: null
72+
kernel_name: torch::executor::expm1_out
73+
74+
- op: floor_divide.out
75+
kernels:
76+
- arg_meta: null
77+
kernel_name: torch::executor::floor_divide_out
78+
79+
- op: index_put.out
80+
kernels:
81+
- arg_meta: null
82+
kernel_name: torch::executor::index_put_out
83+
84+
- op: logical_and.out
85+
kernels:
86+
- arg_meta: null
87+
kernel_name: torch::executor::logical_and_out
88+
89+
- op: logical_or.out
90+
kernels:
91+
- arg_meta: null
92+
kernel_name: torch::executor::logical_or_out
93+
94+
- op: logical_xor.out
95+
kernels:
96+
- arg_meta: null
97+
kernel_name: torch::executor::logical_xor_out
98+
99+
- op: logit.out
100+
kernels:
101+
- arg_meta: null
102+
kernel_name: torch::executor::logit_out
103+
104+
- op: masked_scatter.out
105+
kernels:
106+
- arg_meta: null
107+
kernel_name: torch::executor::masked_scatter_out
108+
109+
- op: masked_select.out
110+
kernels:
111+
- arg_meta: null
112+
kernel_name: torch::executor::masked_select_out
113+
114+
- op: narrow_copy.out
115+
kernels:
116+
- arg_meta: null
117+
kernel_name: torch::executor::narrow_copy_out
118+
119+
- op: nonzero.out
120+
kernels:
121+
- arg_meta: null
122+
kernel_name: torch::executor::nonzero_out
123+
124+
- op: pixel_shuffle.out
125+
kernels:
126+
- arg_meta: null
127+
kernel_name: torch::executor::pixel_shuffle_out
128+
129+
- op: pixel_unshuffle.out
130+
kernels:
131+
- arg_meta: null
132+
kernel_name: torch::executor::pixel_unshuffle_out
133+
134+
- op: prod.int_out
135+
kernels:
136+
- arg_meta: null
137+
kernel_name: torch::executor::prod_int_out
138+
139+
- op: prod.out
140+
kernels:
141+
- arg_meta: null
142+
kernel_name: torch::executor::prod_out
143+
144+
- op: remainder.Tensor_out
145+
kernels:
146+
- arg_meta: null
147+
kernel_name: torch::executor::remainder_Tensor_out
148+
149+
- op: remainder.Scalar_out
150+
kernels:
151+
- arg_meta: null
152+
kernel_name: torch::executor::remainder_Scalar_out
153+
154+
- op: repeat_interleave.Tensor_out
155+
kernels:
156+
- arg_meta: null
157+
kernel_name: torch::executor::repeat_interleave_Tensor_out
158+
159+
- op: reflection_pad1d.out
160+
kernels:
161+
- arg_meta: null
162+
kernel_name: torch::executor::reflection_pad1d_out
163+
164+
- op: reflection_pad3d.out
165+
kernels:
166+
- arg_meta: null
167+
kernel_name: torch::executor::reflection_pad3d_out
168+
169+
- op: replication_pad1d.out
170+
kernels:
171+
- arg_meta: null
172+
kernel_name: torch::executor::replication_pad1d_out
173+
174+
- op: replication_pad2d.out
175+
kernels:
176+
- arg_meta: null
177+
kernel_name: torch::executor::replication_pad2d_out
178+
179+
- op: replication_pad3d.out
180+
kernels:
181+
- arg_meta: null
182+
kernel_name: torch::executor::replication_pad3d_out
183+
184+
- op: round.out
185+
kernels:
186+
- arg_meta: null
187+
kernel_name: torch::executor::round_out
188+
189+
- op: scatter_add.out
190+
kernels:
191+
- arg_meta: null
192+
kernel_name: torch::executor::scatter_add_out
193+
194+
- op: split_copy.Tensor_out
195+
kernels:
196+
- arg_meta: null
197+
kernel_name: torch::executor::split_copy_Tensor_out
198+
199+
- op: squeeze_copy.dim_out
200+
kernels:
201+
- arg_meta: null
202+
kernel_name: torch::executor::squeeze_copy_dim_out
203+
204+
- op: sub.Scalar_out
205+
kernels:
206+
- arg_meta: null
207+
kernel_name: torch::executor::sub_scalar_out
208+
209+
- op: t_copy.out
210+
kernels:
211+
- arg_meta: null
212+
kernel_name: torch::executor::t_copy_out
213+
214+
- op: transpose_copy.int_out
215+
kernels:
216+
- arg_meta: null
217+
kernel_name: torch::executor::transpose_copy_int_out
218+
219+
- op: trunc.out
220+
kernels:
221+
- arg_meta: null
222+
kernel_name: torch::executor::trunc_out
223+
224+
- op: unbind_copy.int_out
225+
kernels:
226+
- arg_meta: null
227+
kernel_name: torch::executor::unbind_copy_int_out
228+
229+
- op: upsample_bilinear2d.vec_out
230+
kernels:
231+
- arg_meta: null
232+
kernel_name: torch::executor::upsample_bilinear2d_vec_out
233+
234+
- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
235+
kernels:
236+
- arg_meta: null
237+
kernel_name: torch::executor::_empty_dim_order_out
238+
239+
- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
240+
kernels:
241+
- arg_meta: null
242+
kernel_name: torch::executor::_to_dim_order_copy_out

0 commit comments

Comments
 (0)