|
| 1 | +# This yaml file contains operators that are unsupported with openvino backend and |
| 2 | +# will use portable kernels for fall back |
| 3 | + |
| 4 | +- op: _cdist_forward.out |
| 5 | + kernels: |
| 6 | + - arg_meta: null |
| 7 | + kernel_name: torch::executor::_cdist_forward_out |
| 8 | + |
| 9 | +- op: _pdist_forward.out |
| 10 | + kernels: |
| 11 | + - arg_meta: null |
| 12 | + kernel_name: torch::executor::_pdist_forward_out |
| 13 | + |
| 14 | +- op: alias_copy.out |
| 15 | + kernels: |
| 16 | + - arg_meta: null |
| 17 | + kernel_name: torch::executor::alias_copy_out |
| 18 | + |
| 19 | +- op: any.all_out |
| 20 | + kernels: |
| 21 | + - arg_meta: null |
| 22 | + kernel_name: torch::executor::any_all_out |
| 23 | + |
| 24 | +- op: any.dims_out |
| 25 | + kernels: |
| 26 | + - arg_meta: null |
| 27 | + kernel_name: torch::executor::any_dims_out |
| 28 | + |
| 29 | +- op: atan.out |
| 30 | + kernels: |
| 31 | + - arg_meta: null |
| 32 | + kernel_name: torch::executor::atan_out |
| 33 | + |
| 34 | +- op: atan2.out |
| 35 | + kernels: |
| 36 | + - arg_meta: null |
| 37 | + kernel_name: torch::executor::atan2_out |
| 38 | + |
| 39 | +- op: bitwise_or.Scalar_out |
| 40 | + kernels: |
| 41 | + - arg_meta: null |
| 42 | + kernel_name: torch::executor::bitwise_or_Scalar_out |
| 43 | + |
| 44 | +- op: bitwise_xor.Scalar_out |
| 45 | + kernels: |
| 46 | + - arg_meta: null |
| 47 | + kernel_name: torch::executor::bitwise_xor_Scalar_out |
| 48 | + |
| 49 | +- op: clamp.Tensor_out |
| 50 | + kernels: |
| 51 | + - arg_meta: null |
| 52 | + kernel_name: torch::executor::clamp_tensor_out |
| 53 | + |
| 54 | +- op: convolution_backward.out |
| 55 | + kernels: |
| 56 | + - arg_meta: null |
| 57 | + kernel_name: torch::executor::convolution_backward_out |
| 58 | + |
| 59 | +- op: detach_copy.out |
| 60 | + kernels: |
| 61 | + - arg_meta: null |
| 62 | + kernel_name: torch::executor::detach_copy_out |
| 63 | + |
| 64 | +- op: diagonal_copy.out |
| 65 | + kernels: |
| 66 | + - arg_meta: null |
| 67 | + kernel_name: torch::executor::diagonal_copy_out |
| 68 | + |
| 69 | +- op: expm1.out |
| 70 | + kernels: |
| 71 | + - arg_meta: null |
| 72 | + kernel_name: torch::executor::expm1_out |
| 73 | + |
| 74 | +- op: floor_divide.out |
| 75 | + kernels: |
| 76 | + - arg_meta: null |
| 77 | + kernel_name: torch::executor::floor_divide_out |
| 78 | + |
| 79 | +- op: index_put.out |
| 80 | + kernels: |
| 81 | + - arg_meta: null |
| 82 | + kernel_name: torch::executor::index_put_out |
| 83 | + |
| 84 | +- op: logical_and.out |
| 85 | + kernels: |
| 86 | + - arg_meta: null |
| 87 | + kernel_name: torch::executor::logical_and_out |
| 88 | + |
| 89 | +- op: logical_or.out |
| 90 | + kernels: |
| 91 | + - arg_meta: null |
| 92 | + kernel_name: torch::executor::logical_or_out |
| 93 | + |
| 94 | +- op: logical_xor.out |
| 95 | + kernels: |
| 96 | + - arg_meta: null |
| 97 | + kernel_name: torch::executor::logical_xor_out |
| 98 | + |
| 99 | +- op: logit.out |
| 100 | + kernels: |
| 101 | + - arg_meta: null |
| 102 | + kernel_name: torch::executor::logit_out |
| 103 | + |
| 104 | +- op: masked_scatter.out |
| 105 | + kernels: |
| 106 | + - arg_meta: null |
| 107 | + kernel_name: torch::executor::masked_scatter_out |
| 108 | + |
| 109 | +- op: masked_select.out |
| 110 | + kernels: |
| 111 | + - arg_meta: null |
| 112 | + kernel_name: torch::executor::masked_select_out |
| 113 | + |
| 114 | +- op: narrow_copy.out |
| 115 | + kernels: |
| 116 | + - arg_meta: null |
| 117 | + kernel_name: torch::executor::narrow_copy_out |
| 118 | + |
| 119 | +- op: nonzero.out |
| 120 | + kernels: |
| 121 | + - arg_meta: null |
| 122 | + kernel_name: torch::executor::nonzero_out |
| 123 | + |
| 124 | +- op: pixel_shuffle.out |
| 125 | + kernels: |
| 126 | + - arg_meta: null |
| 127 | + kernel_name: torch::executor::pixel_shuffle_out |
| 128 | + |
| 129 | +- op: pixel_unshuffle.out |
| 130 | + kernels: |
| 131 | + - arg_meta: null |
| 132 | + kernel_name: torch::executor::pixel_unshuffle_out |
| 133 | + |
| 134 | +- op: prod.int_out |
| 135 | + kernels: |
| 136 | + - arg_meta: null |
| 137 | + kernel_name: torch::executor::prod_int_out |
| 138 | + |
| 139 | +- op: prod.out |
| 140 | + kernels: |
| 141 | + - arg_meta: null |
| 142 | + kernel_name: torch::executor::prod_out |
| 143 | + |
| 144 | +- op: remainder.Tensor_out |
| 145 | + kernels: |
| 146 | + - arg_meta: null |
| 147 | + kernel_name: torch::executor::remainder_Tensor_out |
| 148 | + |
| 149 | +- op: remainder.Scalar_out |
| 150 | + kernels: |
| 151 | + - arg_meta: null |
| 152 | + kernel_name: torch::executor::remainder_Scalar_out |
| 153 | + |
| 154 | +- op: repeat_interleave.Tensor_out |
| 155 | + kernels: |
| 156 | + - arg_meta: null |
| 157 | + kernel_name: torch::executor::repeat_interleave_Tensor_out |
| 158 | + |
| 159 | +- op: reflection_pad1d.out |
| 160 | + kernels: |
| 161 | + - arg_meta: null |
| 162 | + kernel_name: torch::executor::reflection_pad1d_out |
| 163 | + |
| 164 | +- op: reflection_pad3d.out |
| 165 | + kernels: |
| 166 | + - arg_meta: null |
| 167 | + kernel_name: torch::executor::reflection_pad3d_out |
| 168 | + |
| 169 | +- op: replication_pad1d.out |
| 170 | + kernels: |
| 171 | + - arg_meta: null |
| 172 | + kernel_name: torch::executor::replication_pad1d_out |
| 173 | + |
| 174 | +- op: replication_pad2d.out |
| 175 | + kernels: |
| 176 | + - arg_meta: null |
| 177 | + kernel_name: torch::executor::replication_pad2d_out |
| 178 | + |
| 179 | +- op: replication_pad3d.out |
| 180 | + kernels: |
| 181 | + - arg_meta: null |
| 182 | + kernel_name: torch::executor::replication_pad3d_out |
| 183 | + |
| 184 | +- op: round.out |
| 185 | + kernels: |
| 186 | + - arg_meta: null |
| 187 | + kernel_name: torch::executor::round_out |
| 188 | + |
| 189 | +- op: scatter_add.out |
| 190 | + kernels: |
| 191 | + - arg_meta: null |
| 192 | + kernel_name: torch::executor::scatter_add_out |
| 193 | + |
| 194 | +- op: split_copy.Tensor_out |
| 195 | + kernels: |
| 196 | + - arg_meta: null |
| 197 | + kernel_name: torch::executor::split_copy_Tensor_out |
| 198 | + |
| 199 | +- op: squeeze_copy.dim_out |
| 200 | + kernels: |
| 201 | + - arg_meta: null |
| 202 | + kernel_name: torch::executor::squeeze_copy_dim_out |
| 203 | + |
| 204 | +- op: sub.Scalar_out |
| 205 | + kernels: |
| 206 | + - arg_meta: null |
| 207 | + kernel_name: torch::executor::sub_scalar_out |
| 208 | + |
| 209 | +- op: t_copy.out |
| 210 | + kernels: |
| 211 | + - arg_meta: null |
| 212 | + kernel_name: torch::executor::t_copy_out |
| 213 | + |
| 214 | +- op: transpose_copy.int_out |
| 215 | + kernels: |
| 216 | + - arg_meta: null |
| 217 | + kernel_name: torch::executor::transpose_copy_int_out |
| 218 | + |
| 219 | +- op: trunc.out |
| 220 | + kernels: |
| 221 | + - arg_meta: null |
| 222 | + kernel_name: torch::executor::trunc_out |
| 223 | + |
| 224 | +- op: unbind_copy.int_out |
| 225 | + kernels: |
| 226 | + - arg_meta: null |
| 227 | + kernel_name: torch::executor::unbind_copy_int_out |
| 228 | + |
| 229 | +- op: upsample_bilinear2d.vec_out |
| 230 | + kernels: |
| 231 | + - arg_meta: null |
| 232 | + kernel_name: torch::executor::upsample_bilinear2d_vec_out |
| 233 | + |
| 234 | +- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!) |
| 235 | + kernels: |
| 236 | + - arg_meta: null |
| 237 | + kernel_name: torch::executor::_empty_dim_order_out |
| 238 | + |
| 239 | +- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!) |
| 240 | + kernels: |
| 241 | + - arg_meta: null |
| 242 | + kernel_name: torch::executor::_to_dim_order_copy_out |
0 commit comments