Skip to content

FEAT-#128: oneDNN: Implement compilation and execution #129

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jul 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -96,12 +96,12 @@ if(GC_ENABLE_BINDINGS_PYTHON)
endif()

set(GC_LIB_LINKED_LIBS
GCPasses
MLIROneDNNGraph
GCJitWrapper
GCCpuRuntime
)
add_library(graph_compiler SHARED ${GC_LIB_SOURCES})
add_mlir_library(graph_compiler SHARED ${GC_LIB_SOURCES})
target_include_directories(graph_compiler PUBLIC ${GC_LIB_INCLUDES})
target_compile_options(graph_compiler PRIVATE -fvisibility=hidden)
target_compile_options(graph_compiler PRIVATE -fvisibility=hidden -fexceptions)
target_link_options(graph_compiler PRIVATE -Wl,--gc-sections)
target_link_libraries(graph_compiler PRIVATE ${GC_LIB_LINKED_LIBS})

Expand Down
50 changes: 33 additions & 17 deletions src/dnnl/JsonParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,18 @@

#include "gc/Dialect/OneDNNGraph/OneDNNGraphDialect.h"

#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/ExecutionEngine/OptUtils.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/InitAllPasses.h"

#include "JsonParser.h"

mlir::ModuleOp JsonParser::parse() {
std::vector<size_t> inputPorts;
mlir::ModuleOp
JsonParser::parse(llvm::SmallVector<size_t> &outputIds,
std::unordered_map<std::size_t, Strides> &strides) {
llvm::SmallVector<size_t> inputPorts;
bool hasInputPorts = false;
bool hasOutputPorts = false;
_reader.begin_object();
Expand All @@ -57,7 +60,7 @@ mlir::ModuleOp JsonParser::parse() {
readNumArray(inputPorts);
} else if (_str == "output_ports") {
hasOutputPorts = true;
readNumArray(_outputIds);
readNumArray(outputIds);
} else if (_str == "graph") {
_reader.begin_array();
while (_reader.next_array_item()) {
Expand Down Expand Up @@ -87,13 +90,13 @@ mlir::ModuleOp JsonParser::parse() {

if (!hasOutputPorts) {
// If output_ports is not specified, using the last operation's outputs.
_outputIds = _uaS;
outputIds.append(_uaS.begin(), _uaS.end());
}

// The function return values.
std::vector<mlir::Value> outputs;
outputs.reserve(_outputIds.size());
for (auto id : _outputIds) {
llvm::SmallVector<mlir::Value> outputs;
outputs.reserve(outputIds.size());
for (auto id : outputIds) {
auto entry = _valueMap.find(id);
if (entry == _valueMap.end()) {
_str = std::to_string(id);
Expand All @@ -103,13 +106,25 @@ mlir::ModuleOp JsonParser::parse() {
}
auto ret = _builder.create<mlir::func::ReturnOp>(_loc, outputs);

// Copying the strides for the inputs and outputs.
for (auto &ids : {&_inputIds, &outputIds}) {
for (auto id : *ids) {
auto entry = _strides.find(id);
if (entry != _strides.end()) {
strides[id] = entry->second;
}
}
}

// Creating the final function and moving the entry block.
mlir::OpBuilder builder(_builder.getContext());
auto module = builder.create<mlir::ModuleOp>(_loc);
auto func = builder.create<mlir::func::FuncOp>(
_loc, "main",
_loc, "compute",
builder.getFunctionType(_entryBlock->getArgumentTypes(),
ret->getOperandTypes()));
func->setAttr(mlir::LLVM::LLVMDialect::getEmitCWrapperAttrName(),
mlir::UnitAttr::get(_builder.getContext()));
auto entry = func.addEntryBlock();
_entryBlock->moveBefore(entry);
entry->erase();
Expand Down Expand Up @@ -251,7 +266,9 @@ inline mlir::Attribute JsonParser::readAttr() {

mlir::Type JsonParser::readTensorType() {
GetTypeFn getTypeFn = nullptr;
bool strided = false;
_ia64.clear();
_ia642.clear();
_reader.begin_object();

while (_reader.next_object_item(&_str)) {
Expand All @@ -267,22 +284,17 @@ mlir::Type JsonParser::readTensorType() {
} else if (_str == "shape") {
readNumArray(_ia64);
} else if (_str == "stride") {
_ia642.clear();
readNumArray(_ia642);
if ((_ia642.size() > 1) ||
((_ia642.size() == 1) &&
(_ia642[0] != std::numeric_limits<int64_t>::min()))) {
// TODO: Add support for strides
throwErr<std::logic_error>("Unsupported stride value: ");
}
} else if (_str == "layout_type") {
_reader.read_string(&_str);
if ((_str != "undef") && (_str != "any")) {
if (_str == "strided") {
strided = true;
} else if ((_str != "undef") && (_str != "any")) {
throwErr<std::logic_error>("Unsupported layout_type: ");
}
} else if (_str == "property_type") {
_reader.read_string(&_str);
if ((_str != "undef") && (_str != "constant")) {
if ((_str != "undef") && (_str != "variable") && (_str != "constant")) {
throwErr<std::logic_error>("Unsupported property_type: ");
}
} else {
Expand All @@ -295,6 +307,10 @@ mlir::Type JsonParser::readTensorType() {
throwErr<std::invalid_argument>("dtype is not specified");
}

if (strided) {
_strides[_uS].assign(_ia642.begin(), _ia642.end());
}

if ((_ia64.size() == 1) &&
(_ia64[0] == std::numeric_limits<int64_t>::min())) {
return mlir::UnrankedTensorType::get(getTypeFn(_builder));
Expand Down
39 changes: 23 additions & 16 deletions src/dnnl/JsonParser.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,22 +43,25 @@ using float32_t = float;
#include "mlir/Parser/Parser.h"
#include "mlir/Tools/mlir-opt/MlirOptMain.h"

#include "dnnl_types.h"
#include "graph/utils/json.hpp"

using Strides = llvm::SmallVector<int64_t, DNNL_MAX_NDIMS>;

class JsonParser {
dnnl::impl::graph::utils::json::json_reader_t _reader;
mlir::OpBuilder _builder;
mlir::Location _loc;
mlir::Block *_entryBlock;
std::vector<size_t> &_inputIds;
std::vector<size_t> &_outputIds;
llvm::SmallVector<size_t> &_inputIds;
std::unordered_map<std::size_t, Strides> _strides;
// Function input and operations output values. Used to connect the
// operations inputs and outputs.
std::unordered_map<std::size_t, mlir::Value> _valueMap;
// Temporary value holders, used by the parser
std::vector<mlir::Value> _operands;
std::vector<mlir::Type> _resultTypes;
std::vector<mlir::NamedAttribute> _attributes;
llvm::SmallVector<mlir::Value> _operands;
llvm::SmallVector<mlir::Type> _resultTypes;
llvm::SmallVector<mlir::NamedAttribute> _attributes;
std::string _str;
std::string _str2;
std::size_t _uS;
Expand All @@ -70,9 +73,9 @@ class JsonParser {
std::vector<std::float32_t> _fa32;

JsonParser(mlir::MLIRContext &context, std::istream &stream,
std::vector<size_t> &inputIds, std::vector<size_t> &outputIds)
llvm::SmallVector<size_t> &inputIds)
: _reader(&stream), _builder(&context), _loc(_builder.getUnknownLoc()),
_inputIds(inputIds), _outputIds(outputIds), _valueMap(), _operands(),
_inputIds(inputIds), _strides(), _valueMap(), _operands(),
_resultTypes(), _attributes(), _str(), _str2(), _uS(), _i64(), _f32(),
_uaS(), _ia64(), _ia642(), _fa32() {
// Creating a dummy function since we don't know the actual type yet.
Expand All @@ -82,7 +85,8 @@ class JsonParser {
_builder.setInsertionPointToStart(_entryBlock);
}

mlir::ModuleOp parse();
mlir::ModuleOp parse(llvm::SmallVector<size_t> &outputIds,
std::unordered_map<std::size_t, Strides> &strides);
void readOp();
mlir::Attribute readAttr();
mlir::Type readTensorType();
Expand Down Expand Up @@ -120,11 +124,12 @@ class JsonParser {
}
}

template <typename T> inline void readNumArray(std::vector<T> &vec) {
template <typename T, template <typename...> class Container, typename... Any>
inline void readNumArray(Container<T, Any...> &c) {
_reader.begin_array();
for (T value; _reader.next_array_item();) {
_reader.read_number(&value);
vec.push_back(value);
c.push_back(value);
}
}

Expand Down Expand Up @@ -175,14 +180,16 @@ class JsonParser {
* @param json JSON string containing the oneDNN graph.
* @param inputIds Input tensor IDs are added to this vector.
* @param outputIds Output tensor IDs are added to this vector.
* @param strides Strides for each tensor are added to this map.
* @return The resulting MLIR module.
*/
static mlir::ModuleOp parse(mlir::MLIRContext &context,
const std::string_view &json,
std::vector<size_t> &inputIds,
std::vector<size_t> &outputIds) {
static mlir::ModuleOp
parse(mlir::MLIRContext &context, const std::string_view &json,
llvm::SmallVector<size_t> &inputIds,
llvm::SmallVector<size_t> &outputIds,
std::unordered_map<std::size_t, Strides> &strides) {
std::istringstream stream(json.data());
JsonParser parser(context, stream, inputIds, outputIds);
return parser.parse();
JsonParser parser(context, stream, inputIds);
return parser.parse(outputIds, strides);
}
};
Loading