Skip to content

Commit

Permalink
fallback operators to CPU for onnx support (pytorch#15270)
Browse files Browse the repository at this point in the history
Summary:
fallback operators to CPU for onnx support
Pull Request resolved: pytorch#15270

Differential Revision: D14099496

Pulled By: yinghai

fbshipit-source-id: 52b744aa5917700a802bdf19f7007cdcaa6e640a
  • Loading branch information
gujinghui authored and facebook-github-bot committed Feb 22, 2019
1 parent 4778a40 commit 60de0b8
Show file tree
Hide file tree
Showing 2 changed files with 71 additions and 8 deletions.
71 changes: 63 additions & 8 deletions caffe2/ideep/operators/operator_fallback_ideep.cc
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
#include <caffe2/ideep/operators/operator_fallback_ideep.h>
#include <caffe2/ideep/utils/ideep_operator.h>

#include <caffe2/operators/abs_op.h>
#include <caffe2/operators/atan_op.h>
#include <caffe2/operators/accuracy_op.h>
#include <caffe2/operators/affine_channel_op.h>
#include <caffe2/operators/batch_matmul_op.h>
#include "caffe2/operators/bbox_transform_op.h"
#include "caffe2/operators/box_with_nms_limit_op.h"
#include <caffe2/operators/cast_op.h>
#include <caffe2/operators/clip_op.h>
#include <caffe2/operators/collect_and_distribute_fpn_rpn_proposals_op.h>
#include <caffe2/operators/cross_entropy_op.h>
Expand All @@ -17,6 +20,7 @@
#include <caffe2/operators/elementwise_div_op.h>
#include <caffe2/operators/elementwise_mul_op.h>
#include <caffe2/operators/elementwise_ops.h>
#include <caffe2/operators/elementwise_sub_op.h>
#include <caffe2/operators/expand_op.h>
#include <caffe2/operators/filler_op.h>
#include <caffe2/operators/flatten_op.h>
Expand All @@ -32,12 +36,15 @@
#include <caffe2/operators/roi_align_op.h>
#include <caffe2/operators/roi_align_rotated_op.h>
#include <caffe2/operators/scale_op.h>
#include <caffe2/operators/slice_op.h>
#include <caffe2/operators/sqrt_op.h>
#include <caffe2/operators/softmax_op.h>
#include <caffe2/operators/softmax_with_loss_op.h>
#include <caffe2/operators/stop_gradient.h>
#include <caffe2/operators/tanh_op.h>
#include <caffe2/operators/tensor_protos_db_input.h>
#include <caffe2/operators/transpose_op.h>
#include <caffe2/operators/utility_ops.h>
#include <caffe2/queue/queue_ops.h>
#include <caffe2/sgd/iter_op.h>
#include <caffe2/sgd/learning_rate_op.h>
Expand All @@ -54,6 +61,14 @@
// can add more non-IDEEP operators if needed
namespace caffe2 {

// Boolean operators
REGISTER_IDEEP_COMPARE_OPERATOR(EQ);
REGISTER_IDEEP_COMPARE_OPERATOR(GT);
REGISTER_IDEEP_COMPARE_OPERATOR(GE);
REGISTER_IDEEP_COMPARE_OPERATOR(LT);
REGISTER_IDEEP_COMPARE_OPERATOR(LE);
REGISTER_IDEEP_COMPARE_OPERATOR(NE);

REGISTER_IDEEP_OPERATOR(Softmax, IDEEPFallbackOp<SoftmaxOp<float, CPUContext>>);
REGISTER_IDEEP_OPERATOR(
LabelCrossEntropy,
Expand All @@ -64,6 +79,14 @@ REGISTER_IDEEP_OPERATOR(
REGISTER_IDEEP_OPERATOR(Flatten, IDEEPFallbackOp<FlattenOp<CPUContext>>);
REGISTER_IDEEP_OPERATOR(ResizeLike, IDEEPFallbackOp<ResizeLikeOp<CPUContext>>);
REGISTER_IDEEP_OPERATOR(Transpose, IDEEPFallbackOp<TransposeOp<CPUContext>>);
REGISTER_IDEEP_OPERATOR(Slice, IDEEPFallbackOp<SliceOp<CPUContext>>);
REGISTER_IDEEP_OPERATOR(Clip, IDEEPFallbackOp<ClipOp<float, CPUContext>>);
REGISTER_IDEEP_OPERATOR(
ScatterAssign,
IDEEPFallbackOp<ScatterAssignOp<CPUContext>>);
REGISTER_IDEEP_OPERATOR(
Cast,
IDEEPFallbackOp<CastOp<CPUContext>>);

// filter operators
REGISTER_IDEEP_OPERATOR(
Expand All @@ -81,6 +104,22 @@ REGISTER_IDEEP_OPERATOR(
REGISTER_IDEEP_OPERATOR(
GivenTensorFill,
IDEEPFallbackOp<GivenTensorFillOp<float, CPUContext>>);
// Not supported tensor types in below FillOp
REGISTER_IDEEP_OPERATOR(
GivenTensorDoubleFill,
IDEEPFallbackOp<GivenTensorFillOp<double, CPUContext>, SkipIndices<0>>);
REGISTER_IDEEP_OPERATOR(
GivenTensorBoolFill,
IDEEPFallbackOp<GivenTensorFillOp<bool, CPUContext>, SkipIndices<0>>);
REGISTER_IDEEP_OPERATOR(
GivenTensorIntFill,
IDEEPFallbackOp<GivenTensorFillOp<int, CPUContext>, SkipIndices<0>>);
REGISTER_IDEEP_OPERATOR(
GivenTensorInt64Fill,
IDEEPFallbackOp<GivenTensorFillOp<int64_t, CPUContext>, SkipIndices<0>>);
REGISTER_IDEEP_OPERATOR(
GivenTensorStringFill,
IDEEPFallbackOp<GivenTensorFillOp<std::string, CPUContext>, SkipIndices<0>>);
REGISTER_IDEEP_OPERATOR(Load, IDEEPFallbackOp<LoadOp<CPUContext>>);
REGISTER_IDEEP_OPERATOR(Save, IDEEPFallbackOp<SaveOp<CPUContext>>);

Expand Down Expand Up @@ -142,10 +181,30 @@ REGISTER_IDEEP_OPERATOR(
REGISTER_IDEEP_OPERATOR(
LearningRate,
IDEEPFallbackOp<LearningRateOp<float, CPUContext>>);
REGISTER_IDEEP_OPERATOR(
Abs,
IDEEPFallbackOp<UnaryElementwiseOp<
TensorTypes<float>, CPUContext, AbsFunctor<CPUContext>>>);
REGISTER_IDEEP_OPERATOR(
Atan,
IDEEPFallbackOp<UnaryElementwiseOp<
TensorTypes<float>, CPUContext, AtanFunctor<CPUContext>>>);
REGISTER_IDEEP_OPERATOR(
Sqrt,
IDEEPFallbackOp<UnaryElementwiseOp<
TensorTypes<float>, CPUContext, SqrtFunctor<CPUContext>>>);
REGISTER_IDEEP_OPERATOR(
Div,
IDEEPFallbackOp<BinaryElementwiseOp<
NumericTypes, CPUContext, DivFunctor<CPUContext>>>);
REGISTER_IDEEP_OPERATOR(
Mul,
IDEEPFallbackOp<
BinaryElementwiseOp<NumericTypes, CPUContext, MulFunctor<CPUContext>>>);
REGISTER_IDEEP_OPERATOR(
Sub,
IDEEPFallbackOp<BinaryElementwiseOp<
NumericTypes, CPUContext, SubFunctor<CPUContext>>>);
REGISTER_IDEEP_OPERATOR(
Tanh,
IDEEPFallbackOp<UnaryElementwiseOp<
Expand Down Expand Up @@ -210,18 +269,14 @@ REGISTER_IDEEP_OPERATOR(
TensorTypes<std::int32_t, std::int64_t, float, double>,
CPUContext,
SumReducer<CPUContext>>>);

REGISTER_IDEEP_OPERATOR(
ReduceMean,
IDEEPFallbackOp<ReduceOp<
TensorTypes<float>, CPUContext, MeanReducer<CPUContext>>>);
REGISTER_IDEEP_OPERATOR(
BatchMatMul,
IDEEPFallbackOp<BatchMatMulOp<CPUContext>>);

REGISTER_IDEEP_OPERATOR(
Div,
IDEEPFallbackOp<
BinaryElementwiseOp<NumericTypes, CPUContext, DivFunctor<CPUContext>>>);

REGISTER_IDEEP_OPERATOR(Clip, IDEEPFallbackOp<ClipOp<float, CPUContext>>);

#ifdef CAFFE2_USE_GLOO
namespace gloo {
// gloo operators
Expand Down
8 changes: 8 additions & 0 deletions caffe2/ideep/utils/ideep_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,14 @@ C10_DECLARE_REGISTRY(
C10_REGISTER_CLASS(IDEEPOperatorRegistry, name, __VA_ARGS__)
#define REGISTER_IDEEP_OPERATOR_STR(str_name, ...) \
C10_REGISTER_TYPED_CLASS(IDEEPOperatorRegistry, str_name, __VA_ARGS__)
#define REGISTER_IDEEP_COMPARE_OPERATOR(Op) \
REGISTER_IDEEP_OPERATOR( \
Op, \
IDEEPFallbackOp<BinaryElementwiseOp< \
TensorTypes<bool, int32_t, int64_t, float, double>, \
CPUContext, \
Op##Functor<CPUContext>, \
FixedType<bool>>>)

#define REGISTER_IDEEP_OPERATOR_WITH_ENGINE(name, engine, ...) \
C10_REGISTER_CLASS(IDEEPOperatorRegistry, name##_ENGINE_##engine, __VA_ARGS__)
Expand Down

0 comments on commit 60de0b8

Please sign in to comment.