Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ferdinand.cherry picks from fused ops into ljfitz.fxml 1172 memory schedule #42

Draft
wants to merge 27 commits into
base: feature/fused-ops
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
6850bb7
chore: add pass to lexically schedule for least FM memory
ljfitz Oct 31, 2022
47f96c3
Bump llvm to green commit 4546397e39589f0a6a707218349d1bf65fe54645
gargaroff Oct 27, 2022
6efa18c
Bump LLVM to '4d4ca6c9'
gargaroff Oct 17, 2022
2ab1a74
feat(llvm): bump llvm to green commit 74fb770de9399d7258a8eda974c9361…
gargaroff Nov 11, 2022
5a35415
feat: added the xten_nn dialect
Nov 4, 2022
7d3a898
feat: added print and parse functions
Nov 7, 2022
a5898ef
feat: added a working xten_nn dialect
Nov 7, 2022
a565c9d
feat: addedoutput op and test for subgraph
Nov 7, 2022
0d21cce
style: changed formatting
Nov 7, 2022
0c8b6dd
refactor: removed header
Nov 7, 2022
2b3f26c
refactor: deleted unused enums
Nov 7, 2022
9ff12fc
refactor: deleted unused code
Nov 7, 2022
8f335d2
refactor: changed description of ops
Nov 7, 2022
5a50750
style: removed all dlnn mentions in comments
Nov 7, 2022
d2bc42b
fix: reverted cmake formatting
Nov 8, 2022
b80f719
style: fixed formatting
Nov 8, 2022
5797154
refactor: changed a lit test to only see non standard dialects
Nov 8, 2022
b954dfc
refactor: remove cmake helpers
Nov 8, 2022
0634c4e
refactor: added dialect description
Nov 8, 2022
fe9a665
refactor: refactor for llvm bump
Nov 10, 2022
67c8e10
refactor: rename cmake lib to match xten
Nov 10, 2022
047b408
fix: use right cmake function
Nov 10, 2022
e6eab2e
feat: added simplification pass
Nov 11, 2022
561cba2
feat: added verifier tests
Nov 11, 2022
2b8b1cb
style: fixed comments
Nov 11, 2022
63a7dc3
feat: added generic form test cases for verifier
Nov 11, 2022
ab45de3
refactor: changed error messages in varify methods
Nov 11, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion include/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@
#
# (c) Copyright 2021 Xilinx Inc.

add_subdirectory(xten)
add_subdirectory(xten)
2 changes: 1 addition & 1 deletion include/xten/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@

add_subdirectory(Dialect)
add_subdirectory(Conversion)
add_subdirectory(Transform)
add_subdirectory(Transform)
4 changes: 3 additions & 1 deletion include/xten/Dialect/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,7 @@
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# (c) Copyright 2021 Xilinx Inc.
# (c) Copyright 2022 Advanced Micro Devices, Inc.

add_subdirectory(XTen)
add_subdirectory(XTen)
add_subdirectory(XTenNN)
2 changes: 1 addition & 1 deletion include/xten/Dialect/XTen/XTenDataflowUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"

using namespace mlir;
Expand Down
1 change: 1 addition & 0 deletions include/xten/Dialect/XTen/XTenOps.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "mlir/Interfaces/SideEffectInterfaces.h"

#include "torch-mlir/Dialect/Torch/IR/TorchDialect.h"
#include "torch-mlir/Dialect/Torch/IR/TorchTypes.h"

#define GET_OP_CLASSES
#include "xten/Dialect/XTen/XTenOps.h.inc"
Expand Down
48 changes: 24 additions & 24 deletions include/xten/Dialect/XTen/XTenOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def XTen_AddOp: XTen_Op<"add", []>,
}];
}

def XTen_MMOp: XTen_Op<"mm", [NoSideEffect]>,
def XTen_MMOp: XTen_Op<"mm", [Pure]>,
Results<(outs AnyTorchTensorType)> {
let arguments = (
ins AnyTorchTensorType:$x,
Expand Down Expand Up @@ -91,7 +91,7 @@ def XTen_SoftmaxOp: XTen_Op<"softmax", []>,
}];
}

def XTen_GlobalAveragePool2D: XTen_Op<"globalaveragepool2d", [NoSideEffect]>,
def XTen_GlobalAveragePool2D: XTen_Op<"globalaveragepool2d", [Pure]>,
Results<(outs AnyTorchTensorType:$output)> {
let arguments = (
ins AnyTorchTensorType:$input
Expand All @@ -116,7 +116,7 @@ def XTen_NoOp: XTen_Op<"noop", []>,
}];
}

def XTen_Conv2dOp: XTen_Op<"conv2d", [NoSideEffect]>,
def XTen_Conv2dOp: XTen_Op<"conv2d", [Pure]>,
Results<(outs AnyTorchTensorType:$result)> {
let arguments = (
ins AnyTorchTensorType:$input,
Expand All @@ -140,7 +140,7 @@ def XTen_Conv2dOp: XTen_Op<"conv2d", [NoSideEffect]>,
}

// TODO what happens when we have both?
def XTen_PartialConv2dOp: XTen_Op<"partialconv2d", [NoSideEffect]>{
def XTen_PartialConv2dOp: XTen_Op<"partialconv2d", [Pure]>{
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchOptionalTensorType:$PartialIn,
Expand Down Expand Up @@ -169,7 +169,7 @@ def XTen_PartialConv2dOp: XTen_Op<"partialconv2d", [NoSideEffect]>{
}


def XTen_Conv2dReLUOp: XTen_Op<"conv2d_relu", [NoSideEffect]>,
def XTen_Conv2dReLUOp: XTen_Op<"conv2d_relu", [Pure]>,
Results<(outs AnyTorchTensorType)> {
let arguments = (
ins AnyTorchTensorType:$input,
Expand All @@ -193,7 +193,7 @@ def XTen_Conv2dReLUOp: XTen_Op<"conv2d_relu", [NoSideEffect]>,
}

def XTen_PartialConv2dReLUOp: XTen_Op<"partialconv2d_relu",
[NoSideEffect]> {
[Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchOptionalTensorType:$PartialIn,
Expand Down Expand Up @@ -221,7 +221,7 @@ def XTen_PartialConv2dReLUOp: XTen_Op<"partialconv2d_relu",
}];
}

def XTen_Conv2dBatchNormReLUOp: XTen_Op<"conv2d_bn_relu", [NoSideEffect]>,
def XTen_Conv2dBatchNormReLUOp: XTen_Op<"conv2d_bn_relu", [Pure]>,
Results<(outs AnyTorchTensorType)> {
let arguments = (
ins AnyTorchTensorType:$input,
Expand Down Expand Up @@ -253,7 +253,7 @@ def XTen_Conv2dBatchNormReLUOp: XTen_Op<"conv2d_bn_relu", [NoSideEffect]>,
}

def XTen_PartialConv2dBatchNormReLUOp: XTen_Op<"partialconv2d_bn_relu",
[NoSideEffect]> {
[Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchOptionalTensorType:$PartialIn,
Expand Down Expand Up @@ -288,7 +288,7 @@ def XTen_PartialConv2dBatchNormReLUOp: XTen_Op<"partialconv2d_bn_relu",
}];
}

def XTen_ConcatOp: XTen_Op<"concat", [NoSideEffect]>,
def XTen_ConcatOp: XTen_Op<"concat", [Pure]>,
Results<(outs AnyTorchTensorType)> {
let arguments = (
ins Variadic<AnyTorchTensorType>:$inputs,
Expand All @@ -307,7 +307,7 @@ def XTen_ConcatOp: XTen_Op<"concat", [NoSideEffect]>,
}

// TODO Proper verifier for this operation?
def XTen_SplitOp: XTen_Op<"split", [NoSideEffect]> {
def XTen_SplitOp: XTen_Op<"split", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
XTen_AnyScalar:$dim
Expand All @@ -328,7 +328,7 @@ def XTen_SplitOp: XTen_Op<"split", [NoSideEffect]> {
}];
}

def XTen_Conv2dLReLUOp: XTen_Op<"conv2d_lrelu", [NoSideEffect]>,
def XTen_Conv2dLReLUOp: XTen_Op<"conv2d_lrelu", [Pure]>,
Results<(outs AnyTorchTensorType)> {
let arguments = (
ins AnyTorchTensorType:$input,
Expand All @@ -352,7 +352,7 @@ def XTen_Conv2dLReLUOp: XTen_Op<"conv2d_lrelu", [NoSideEffect]>,
}];
}

def XTen_Conv2dLReLUPadOp: XTen_Op<"conv2d_lrelu_pad", [NoSideEffect]>,
def XTen_Conv2dLReLUPadOp: XTen_Op<"conv2d_lrelu_pad", [Pure]>,
Results<(outs AnyTorchTensorType)> {
let arguments = (
ins AnyTorchTensorType:$input,
Expand All @@ -379,7 +379,7 @@ def XTen_Conv2dLReLUPadOp: XTen_Op<"conv2d_lrelu_pad", [NoSideEffect]>,
}


def XTen_Conv2dReLUPadOp: XTen_Op<"conv2d_relu_pad", [NoSideEffect]>,
def XTen_Conv2dReLUPadOp: XTen_Op<"conv2d_relu_pad", [Pure]>,
Results<(outs AnyTorchTensorType)> {
let arguments = (
ins AnyTorchTensorType:$input,
Expand All @@ -405,7 +405,7 @@ def XTen_Conv2dReLUPadOp: XTen_Op<"conv2d_relu_pad", [NoSideEffect]>,
}


def XTen_Conv2dReLUMaxPoolOp: XTen_Op<"conv2d_relu_maxpool", [NoSideEffect]> {
def XTen_Conv2dReLUMaxPoolOp: XTen_Op<"conv2d_relu_maxpool", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand Down Expand Up @@ -436,7 +436,7 @@ def XTen_Conv2dReLUMaxPoolOp: XTen_Op<"conv2d_relu_maxpool", [NoSideEffect]> {
}];
}

def XTen_Conv2dReLUPadMaxPoolOp: XTen_Op<"conv2d_relu_pad_maxpool", [NoSideEffect]> {
def XTen_Conv2dReLUPadMaxPoolOp: XTen_Op<"conv2d_relu_pad_maxpool", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand Down Expand Up @@ -470,7 +470,7 @@ def XTen_Conv2dReLUPadMaxPoolOp: XTen_Op<"conv2d_relu_pad_maxpool", [NoSideEffec
}


def XTen_Conv2dLReLUMaxPoolOp: XTen_Op<"conv2d_lrelu_maxpool", [NoSideEffect]> {
def XTen_Conv2dLReLUMaxPoolOp: XTen_Op<"conv2d_lrelu_maxpool", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand Down Expand Up @@ -502,7 +502,7 @@ def XTen_Conv2dLReLUMaxPoolOp: XTen_Op<"conv2d_lrelu_maxpool", [NoSideEffect]> {
}];
}

def XTen_Conv2dLReLUPadMaxPoolOp: XTen_Op<"conv2d_lrelu_pad_maxpool", [NoSideEffect]> {
def XTen_Conv2dLReLUPadMaxPoolOp: XTen_Op<"conv2d_lrelu_pad_maxpool", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand Down Expand Up @@ -536,7 +536,7 @@ def XTen_Conv2dLReLUPadMaxPoolOp: XTen_Op<"conv2d_lrelu_pad_maxpool", [NoSideEff
}];
}

def XTen_Conv2dTensorAddOp: XTen_Op<"conv2d_tensoradd", [NoSideEffect]> {
def XTen_Conv2dTensorAddOp: XTen_Op<"conv2d_tensoradd", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand All @@ -559,7 +559,7 @@ def XTen_Conv2dTensorAddOp: XTen_Op<"conv2d_tensoradd", [NoSideEffect]> {
}];
}

def XTen_Conv2dTensorAddReLUOp: XTen_Op<"conv2d_tensoradd_relu", [NoSideEffect]> {
def XTen_Conv2dTensorAddReLUOp: XTen_Op<"conv2d_tensoradd_relu", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand All @@ -582,7 +582,7 @@ def XTen_Conv2dTensorAddReLUOp: XTen_Op<"conv2d_tensoradd_relu", [NoSideEffect]>
}];
}

def XTen_Conv2dTensorAddLReLUOp: XTen_Op<"conv2d_tensoradd_lrelu", [NoSideEffect]> {
def XTen_Conv2dTensorAddLReLUOp: XTen_Op<"conv2d_tensoradd_lrelu", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand All @@ -607,7 +607,7 @@ def XTen_Conv2dTensorAddLReLUOp: XTen_Op<"conv2d_tensoradd_lrelu", [NoSideEffect
}];
}

def XTen_Conv2dTensorAddGlobalAveragePoolOp: XTen_Op<"conv2d_tensoradd_globalaveragepool", [NoSideEffect]> {
def XTen_Conv2dTensorAddGlobalAveragePoolOp: XTen_Op<"conv2d_tensoradd_globalaveragepool", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand All @@ -631,7 +631,7 @@ def XTen_Conv2dTensorAddGlobalAveragePoolOp: XTen_Op<"conv2d_tensoradd_globalave
}];
}

def XTen_Conv2dTensorAddReLUGlobalAveragePoolOp: XTen_Op<"conv2d_tensoradd_relu_globalaveragepool", [NoSideEffect]> {
def XTen_Conv2dTensorAddReLUGlobalAveragePoolOp: XTen_Op<"conv2d_tensoradd_relu_globalaveragepool", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand All @@ -655,7 +655,7 @@ def XTen_Conv2dTensorAddReLUGlobalAveragePoolOp: XTen_Op<"conv2d_tensoradd_relu_
}];
}

def XTen_Conv2dTensorAddLReLUGlobalAveragePoolOp: XTen_Op<"conv2d_tensoradd_lrelu_globalaveragepool", [NoSideEffect]> {
def XTen_Conv2dTensorAddLReLUGlobalAveragePoolOp: XTen_Op<"conv2d_tensoradd_lrelu_globalaveragepool", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand All @@ -681,7 +681,7 @@ def XTen_Conv2dTensorAddLReLUGlobalAveragePoolOp: XTen_Op<"conv2d_tensoradd_lrel
}];
}

def XTen_LinearOp: XTen_Op<"linear", [NoSideEffect]> {
def XTen_LinearOp: XTen_Op<"linear", [Pure]> {
let arguments = (
ins AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
Expand Down
17 changes: 17 additions & 0 deletions include/xten/Dialect/XTenNN/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#
# This file is licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# (c) Copyright 2022 Advanced Micro Devices, Inc.

add_custom_target(XTenNNIncGen)

add_subdirectory(Interfaces)

add_subdirectory(IR)

add_subdirectory(Transforms)



18 changes: 18 additions & 0 deletions include/xten/Dialect/XTenNN/IR/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#
# This file is licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# (c) Copyright 2022 Advanced Micro Devices, Inc.

set(LLVM_TARGET_DEFINITIONS XTenNNOps.td)

mlir_tablegen(XTenNNBase.h.inc -gen-dialect-decls)
mlir_tablegen(XTenNNBase.cpp.inc -gen-dialect-defs)
mlir_tablegen(XTenNNOps.h.inc -gen-op-decls)
mlir_tablegen(XTenNNOps.cpp.inc -gen-op-defs)

add_public_tablegen_target(XTenNNDialectIncGen)
add_dependencies(XTenNNIncGen XTenNNDialectIncGen)

add_mlir_doc(XTenNNOps XTenNN Dialects/ -gen-dialect-doc)
17 changes: 17 additions & 0 deletions include/xten/Dialect/XTenNN/IR/XTenNN.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
//===- XTenNN.h ----------------------------------------------*- C++ -*-===//
//
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
// (c) Copyright 2022 Advanced Micro Devices, Inc.
//
//===----------------------------------------------------------------------===//
//
// Convenience include for the XTenNN dialect.
//
//===----------------------------------------------------------------------===//

#pragma once

#include "xten/Dialect/XTenNN/IR/XTenNNOps.h"
30 changes: 30 additions & 0 deletions include/xten/Dialect/XTenNN/IR/XTenNNBase.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
//===- XTenNNBase.h --------------------------------------------*- C++ -*-===//
//
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
// (c) Copyright 2022 Advanced Micro Devices, Inc.
//
//===----------------------------------------------------------------------===//
//
// Declaration of the XTenNN dialect base.
//
//===----------------------------------------------------------------------===//

#pragma once

#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include <cstddef>
#include <cstdint>

//===- Generated includes -------------------------------------------------===//

#include "xten/Dialect/XTenNN/IR/XTenNNBase.h.inc"

//===----------------------------------------------------------------------===//
39 changes: 39 additions & 0 deletions include/xten/Dialect/XTenNN/IR/XTenNNBase.td
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
//===-- XTenNNBase.td - XTenNN dialect definitions *- tablegen -*------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
// (c) Copyright 2022 Advanced Micro Devices, Inc.
//
//===----------------------------------------------------------------------===//

#ifndef XTENNN_BASE
#define XTENNN_BASE

include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/OpBase.td"

include "xten/Dialect/XTenNN/Interfaces/EnclaveOpInterface.td"

def XTenNN_Dialect : Dialect {
let name = "xten_nn";
let cppNamespace = "::amd::xten_nn";
let summary = "The XTen Neural Network dialect";
let description = [{
The `xten_nn` dialect provides basic graph-like semantics. It implements a subgraph
operation that can wrap operations into regions and interfaces to handle those.
}];

code extraClassDeclaration = [{
private:
void registerOps();

}];

let useDefaultTypePrinterParser = 0;

let emitAccessorPrefix = kEmitAccessorPrefix_Prefixed;
}

#endif // XTENNN_BASE
Loading