Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cherry-pick IR changes #9430

Merged
merged 20 commits into from
Nov 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions cmake/caches/cn/fast/mlir-cuda-80.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
set(BUILD_SHARED_LIBS YES CACHE BOOL "")
# uncomment only if you know what you are doing
# set(CMAKE_LINK_DEPENDS_NO_SHARED YES CACHE BOOL "")
set(BUILD_CUDA YES CACHE BOOL "")
set(BUILD_GIT_VERSION NO CACHE BOOL "")
set(TREAT_WARNINGS_AS_ERRORS YES CACHE BOOL "")
set(BUILD_HWLOC NO CACHE BOOL "")
set(BUILD_TESTING OFF CACHE BOOL "")
set(WITH_MLIR YES CACHE BOOL "")
set(WITH_MLIR_CUDA_CODEGEN YES CACHE BOOL "")
set(THIRD_PARTY_MIRROR aliyun CACHE STRING "")
set(PIP_INDEX_MIRROR "https://pypi.tuna.tsinghua.edu.cn/simple" CACHE STRING "")
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "")
set(CMAKE_GENERATOR Ninja CACHE STRING "")
set(CMAKE_CUDA_ARCHITECTURES "80" CACHE STRING "")
set(CUDA_TOOLKIT_ROOT_DIR /usr/local/cuda CACHE STRING "")
set(CUDNN_ROOT_DIR /usr/local/cudnn CACHE STRING "")
set(CMAKE_C_COMPILER_LAUNCHER ccache CACHE STRING "")
set(CMAKE_CXX_COMPILER_LAUNCHER ccache CACHE STRING "")
set(CMAKE_CUDA_COMPILER_LAUNCHER ccache CACHE STRING "")
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION OFF CACHE BOOL "")
set(CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld" CACHE STRING "")
set(CMAKE_MODULE_LINKER_FLAGS_INIT "-fuse-ld=lld" CACHE STRING "")
set(CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld" CACHE STRING "")
set(CPU_THREADING_RUNTIME SEQ CACHE STRING
"when using lld with TBB enabled, there will be linkage error")
set(BUILD_HWLOC OFF CACHE BOOL "")
set(WITH_ONEDNN OFF CACHE BOOL "")
41 changes: 41 additions & 0 deletions oneflow/ir/include/OneFlow/OneFlowInterfaces.td
Original file line number Diff line number Diff line change
Expand Up @@ -116,5 +116,46 @@ def NCHWCompatibleInterface : OpInterface<"NCHWCompatible"> {
let cppNamespace = "::mlir::oneflow";
}

def BiasAddCompatibleInterface : OpInterface<"BiasAddCompatible"> {
let description = [{
Interface of ops used as bias add
}];

let methods = [
InterfaceMethod<"",
"bool", "isLastDim", (ins)
>,
InterfaceMethod<"",
"mlir::Value", "b", (ins)
>,
InterfaceMethod<"",
"mlir::Value", "out", (ins)
>,
];
let cppNamespace = "::mlir::oneflow";
}

def MatMulCompatibleInterface : OpInterface<"MatMulCompatible"> {
let description = [{
Interface of ops used as matmul
}];

let methods = [
InterfaceMethod<"is this a transpose_a=false, transpose_b=true matmul",
"bool", "isLinear", (ins)
>,
InterfaceMethod<"",
"mlir::Value", "a", (ins)
>,
InterfaceMethod<"",
"mlir::Value", "b", (ins)
>,
InterfaceMethod<"",
"mlir::Value", "out", (ins)
>,
];
let cppNamespace = "::mlir::oneflow";
}


#endif // ONEFLOW_IR_INCLUDE_ONEFLOW_ONEFLOWINTERFACES_H_
36 changes: 36 additions & 0 deletions oneflow/ir/include/OneFlow/OneFlowOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -315,4 +315,40 @@ def ConvertToSignlessForTosaPass : Pass<"convert-to-signless-for-tosa", "ModuleO
let dependentDialects = ["func::FuncDialect"];
}

def CSEWithAttributesIgnored : Pass<"cse-with-attributes-ignored", "ModuleOp"> {
let summary = "ignore oneflow attributes to have cse work";
let description = [{
cse and ignore oneflow attributes like op name, symbol id, etc.
}];
let constructor = "mlir::oneflow::createCSEWithAttributesIgnored()";
let dependentDialects = [];
}

def CSEPutAttributes : Pass<"cse-put-attributes", "ModuleOp"> {
let summary = "cse and ignore oneflow attributes";
let description = [{
put back oneflow attributes like op name, symbol id, etc.
}];
let constructor = "mlir::oneflow::createCSEPutAttributes()";
let dependentDialects = [];
}

def GroupMatMul : Pass<"group-matmul", "ModuleOp"> {
let summary = "group matmul together";
let description = [{
group matmul ops together and use cudnn batched matmul
}];
let constructor = "mlir::oneflow::createGroupMatMul()";
let dependentDialects = [];
}

def FuseForwardOps : Pass<"fuse-forward-only-ops", "ModuleOp"> {
let summary = "fuse forward ops";
let description = [{
fuse forward ops. Usually they are actions after an op.
}];
let constructor = "mlir::oneflow::createFuseForwardOps()";
let dependentDialects = [];
}

#endif // ONEFLOW_PASSES
10 changes: 5 additions & 5 deletions oneflow/ir/include/OneFlow/OneFlowUserOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ class OneFlow_NormalizationBaseOp<string mnemonic, list<Trait> traits = []> : On

#ifdef GET_ONEFLOW_BINARY_OP_DEFINITIONS

def OneFlow_BiasAddOp : OneFlow_BaseOp<"bias_add", [NoSideEffect, DeclareOpInterfaceMethods<UserOpCompatibleInterface>, DeclareOpInterfaceMethods<NCHWCompatibleInterface>]> {
def OneFlow_BiasAddOp : OneFlow_BaseOp<"bias_add", [NoSideEffect, DeclareOpInterfaceMethods<UserOpCompatibleInterface>, DeclareOpInterfaceMethods<NCHWCompatibleInterface>, DeclareOpInterfaceMethods<BiasAddCompatibleInterface>]> {
let input = (ins
OneFlow_Tensor:$a,
OneFlow_Tensor:$b
Expand Down Expand Up @@ -662,12 +662,12 @@ def OneFlow_XlogyOp : OneFlow_BaseOp<"xlogy", [NoSideEffect, DeclareOpInterfaceM
#endif // GET_ONEFLOW_BINARY_OP_DEFINITIONS

// Group: BROADCAST
// broadcast_add, broadcast_div, broadcast_div_grad, broadcast_equal, broadcast_floor_mod, broadcast_fmod, broadcast_greater, broadcast_greater_equal, broadcast_less, broadcast_less_equal, broadcast_like, broadcast_logical_and, broadcast_logical_or, broadcast_logical_xor, broadcast_maximum, broadcast_minimum, broadcast_mul, broadcast_not_equal, broadcast_pow, broadcast_sub, broadcast_isclose_eq_nan, broadcast_isclose_neq_nan
// broadcast_add, broadcast_div, broadcast_div_grad, broadcast_equal, broadcast_floor_mod, broadcast_fmod, broadcast_greater, broadcast_greater_equal, broadcast_less, broadcast_less_equal, broadcast_like, broadcast_logical_and, broadcast_logical_or, broadcast_logical_xor, broadcast_maximum, broadcast_minimum, broadcast_mul, broadcast_not_equal, broadcast_pow, broadcast_sub, broadcast_isclose_eq_nan, broadcast_isclose_neq_nan
// Total: 22

#ifdef GET_ONEFLOW_BROADCAST_OP_DEFINITIONS

def OneFlow_BroadcastAddOp : OneFlow_BaseOp<"broadcast_add", [NoSideEffect, DeclareOpInterfaceMethods<UserOpCompatibleInterface>, DeclareOpInterfaceMethods<NCHWCompatibleInterface>]> {
def OneFlow_BroadcastAddOp : OneFlow_BaseOp<"broadcast_add", [NoSideEffect, DeclareOpInterfaceMethods<UserOpCompatibleInterface>, DeclareOpInterfaceMethods<NCHWCompatibleInterface>, DeclareOpInterfaceMethods<BiasAddCompatibleInterface>]> {
let input = (ins
OneFlow_Tensor:$x,
OneFlow_Tensor:$y
Expand Down Expand Up @@ -4648,7 +4648,7 @@ def OneFlow_BatchMatmulOp : OneFlow_BaseOp<"batch_matmul", [NoSideEffect, Declar
let has_compute_complexity_fn = 1;
}

def OneFlow_BroadcastMatmulOp : OneFlow_BaseOp<"broadcast_matmul", [NoSideEffect, DeclareOpInterfaceMethods<UserOpCompatibleInterface>]> {
def OneFlow_BroadcastMatmulOp : OneFlow_BaseOp<"broadcast_matmul", [NoSideEffect, DeclareOpInterfaceMethods<UserOpCompatibleInterface>, DeclareOpInterfaceMethods<MatMulCompatibleInterface>]> {
let input = (ins
OneFlow_Tensor:$a,
OneFlow_Tensor:$b,
Expand Down Expand Up @@ -4751,7 +4751,7 @@ def OneFlow_ErfcGradOp : OneFlow_BaseOp<"erfc_grad", [NoSideEffect, DeclareOpInt
let has_data_type_infer_fn = 1;
}

def OneFlow_MatmulOp : OneFlow_BaseOp<"matmul", [NoSideEffect, DeclareOpInterfaceMethods<UserOpCompatibleInterface>]> {
def OneFlow_MatmulOp : OneFlow_BaseOp<"matmul", [NoSideEffect, DeclareOpInterfaceMethods<UserOpCompatibleInterface>, DeclareOpInterfaceMethods<MatMulCompatibleInterface>]> {
let input = (ins
OneFlow_Tensor:$a,
OneFlow_Tensor:$b,
Expand Down
1 change: 1 addition & 0 deletions oneflow/ir/include/OneFlow/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ limitations under the License.
#include "OneFlow/Transform/ConvertInferenceOp.h"
#include "OneFlow/Transform/OutlineAndFuse.h"
#include "OneFlow/Transform/AutoNhwc.h"
#include "OneFlow/Transform/CSEWithAttributesIgnored.h"

#ifdef WITH_MLIR_CUDA_CODEGEN
#include "OneFlow/Conversion/PTXToCubin.h"
Expand Down
39 changes: 39 additions & 0 deletions oneflow/ir/include/OneFlow/Transform/CSEWithAttributesIgnored.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/*
Copyright 2020 The OneFlow Authors. All rights reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef ONEFLOW_IR_INCLUDE_ONEFLOW_TRANSFORM_CSEWITHATTRIBUTESIGNORED_H_
#define ONEFLOW_IR_INCLUDE_ONEFLOW_TRANSFORM_CSEWITHATTRIBUTESIGNORED_H_

#include "mlir/Pass/Pass.h"

namespace mlir {

namespace oneflow {

struct CSEState {
llvm::DenseMap<Operation*, IntegerAttr> scopeSymbolIDs;
llvm::DenseMap<Operation*, StringAttr> opNames;
};
std::unique_ptr<mlir::Pass> createCSEWithAttributesIgnored();
std::unique_ptr<mlir::Pass> createCSEPutAttributes();
std::pair<std::unique_ptr<Pass>, std::unique_ptr<Pass>> createCSEPasses(
std::shared_ptr<CSEState> state);
void registerCSEPasses(std::shared_ptr<CSEState> state);

} // namespace oneflow

} // namespace mlir

#endif // ONEFLOW_IR_INCLUDE_ONEFLOW_TRANSFORM_CSEWITHATTRIBUTESIGNORED_H_
2 changes: 2 additions & 0 deletions oneflow/ir/include/OneFlow/Transform/OutlineAndFuse.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ std::unique_ptr<mlir::Pass> createConvertOFKLCalleeToLLVMPass();
std::unique_ptr<mlir::Pass> createKernelLaunchFunctionPass();
std::unique_ptr<mlir::Pass> createOutlineJitFunctionPass();
std::unique_ptr<mlir::Pass> createFuseIntoExistingOpPass();
std::unique_ptr<mlir::Pass> createGroupMatMul();
std::unique_ptr<mlir::Pass> createFuseForwardOps();

} // namespace oneflow

Expand Down
3 changes: 3 additions & 0 deletions oneflow/ir/lib/OneFlow/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ oneflow_add_mlir_dialect_library(
Transform/OutlineAndFuse.cpp
Transform/AutoNhwc.cpp
Transform/ConvertInferenceOp.cpp
Transform/CSEWithAttributesIgnored.cpp
Transform/GroupMatMulOps.cpp
Transform/AutoNHWCOps.cpp
TransposeHelpers.cpp
Passes.cpp
${GROUPED_OP_CPP_FILES}
Expand Down
Loading