Skip to content

Commit a36a182

Browse files
committed
refactor
1 parent 741908f commit a36a182

37 files changed

+3462
-277
lines changed

.gitignore

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,4 @@
1-
.ipynb_checkpoints/
2-
*.so
1+
.ipynb_checkpoints
2+
.pkl_memoize_py3
3+
main
4+
.DS_Store

onnx-mlir/Release/lib/libcruntime.a

65.9 KB
Binary file not shown.

onnx-mlir/include/CMakeLists.txt

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
3+
add_subdirectory(onnx-mlir)
4+
5+
install(FILES OnnxMlirCompiler.h DESTINATION include)
6+
install(FILES OnnxMlirRuntime.h DESTINATION include)

onnx-mlir/include/OnnxMlirCompiler.h

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
/*
2+
* SPDX-License-Identifier: Apache-2.0
3+
*/
4+
5+
//===------- OnnxMlirCompiler.h - ONNX-MLIR Compiler API Declarations -----===//
6+
//
7+
// This file contains declaration of onnx-mlir compiler functionality
8+
// exported from the OnnxMlirCompiler library
9+
//
10+
//===----------------------------------------------------------------------===//
11+
12+
#ifndef ONNX_MLIR_ONNXMLIRCOMPILER_H
13+
#define ONNX_MLIR_ONNXMLIRCOMPILER_H
14+
15+
#include <onnx-mlir/Compiler/OMCompilerTypes.h>
16+
#include <string>
17+
#ifdef __cplusplus
18+
#include <cstdint>
19+
#else
20+
#include <stdint.h>
21+
#endif // #ifdef __cplusplus
22+
23+
#ifdef ONNX_MLIR_BUILT_AS_STATIC
24+
#define ONNX_MLIR_EXPORT
25+
#else
26+
#ifdef _MSC_VER
27+
#ifdef OnnxMlirCompiler_EXPORTS
28+
/* We are building this library */
29+
#define ONNX_MLIR_EXPORT __declspec(dllexport)
30+
#else
31+
/* We are using this library */
32+
#define ONNX_MLIR_EXPORT __declspec(dllimport)
33+
#endif
34+
#else
35+
#define ONNX_MLIR_EXPORT __attribute__((__visibility__("default")))
36+
#endif
37+
#endif
38+
39+
#ifdef __cplusplus
40+
extern "C" {
41+
namespace onnx_mlir {
42+
#endif
43+
44+
/*!
45+
* C interface to compile an onnx model from a file via onnx-mlir command.
46+
* This interface is thread safe, and does not take any flags from the
47+
* current environment. All flags are passed by using the flags parameter,
48+
* including the "-o output-file-name" option or the "-EmitXXX" options. All
49+
* options that are available to onnx-mlir are also available here.
50+
*
51+
* This call rely on executing onnx-mlir compiler. The user can override its
52+
* default location by using the ONNX_MLIR_BIN_PATH environment variable.
53+
*
54+
* When generating libraries or jar files, the compiler will link in
55+
* lightweight runtimes / jar files. If these libraries / jar files are not in
56+
* the system wide directory (typically /usr/local/lib), the user can override
57+
* the default location using the ONNX_MLIR_LIBRARY_PATH environment variable.
58+
*
59+
* @param inputFilename File name pointing onnx model protobuf or MLIR.
60+
* Name may include a path, and must include the file name and its extention.
61+
*
62+
* @param outputFilename Output file name of the compiled output for the given
63+
* emission target. User is responsible for freeing the string.
64+
*
65+
* @param flags A char * contains all the options provided to compile the
66+
* model.
67+
*
68+
* @param errorMessage Output error message, if any. User is responsible for
69+
* freeing the string.
70+
*
71+
* @return 0 on success or OnnxMlirCompilerErrorCodes on failure.
72+
*/
73+
ONNX_MLIR_EXPORT int64_t omCompileFromFile(const char *inputFilename,
74+
const char *flags, char **outputFilename, char **errorMessage);
75+
76+
/*!
77+
* Compile an onnx model from an ONNX protobuf array. This method is not thread
78+
* safe, and borrows the current compiler options currently defined in this
79+
* process. When generating libraries or jar files, the compiler will link in
80+
* lightweight runtimes / jar files. If these libraries / jar files are not in
81+
* the system wide directory (typically /usr/local/lib), the user can override
82+
* the default location using the ONNX_MLIR_LIBRARY_PATH environment variable.
83+
*
84+
* @param inputBuffer ONNX protobuf array.
85+
* @param bufferSize Size of ONNX protobuf array.
86+
* @param outputBaseName File name without extension to write output.
87+
* Name may include a path, must include the file name, and should not include
88+
* an extention.
89+
* @param emissionTarget Target format to compile to.
90+
* @param outputFilename Output file name of the compiled output for the given
91+
* emission target. User is responsible for freeing the string.
92+
* @param errorMessage Error message, if any. User is responsible for freeing
93+
* the string.
94+
* @return 0 on success or OnnxMlirCompilerErrorCodes failure. User is
95+
* responsible for freeing the string.
96+
*/
97+
ONNX_MLIR_EXPORT int64_t omCompileFromArray(const void *inputBuffer,
98+
int64_t bufferSize, const char *outputBaseName,
99+
EmissionTargetType emissionTarget, char **outputFilename,
100+
char **errorMessage);
101+
102+
/*!
103+
* Compute the file name of the compiled output for the given
104+
* emission target. User is responsible for freeing the string.
105+
*
106+
* @param inputFilename File name pointing onnx model protobuf or MLIR.
107+
* Name may include a path, and must include the file name and its extention.
108+
* @param flags A char * contains all the options provided to compile the
109+
* model.
110+
* @return string containing the file name. User is responsible for freeing the
111+
* string.
112+
*/
113+
ONNX_MLIR_EXPORT char *omCompileOutputFileName(
114+
const char *inputFilename, const char *flags);
115+
116+
/*!
117+
* Compute the model tag from the given compile options.
118+
* User is responsible for freeing the string.
119+
*
120+
* @param flags A char * contains all the options provided to compile the
121+
* model.
122+
* @return string containing the model tag. User is responsible for freeing the
123+
* string.
124+
*/
125+
ONNX_MLIR_EXPORT char *omCompileModelTag(const char *flags);
126+
127+
#ifdef __cplusplus
128+
} // namespace onnx_mlir
129+
} // extern C
130+
#endif
131+
132+
#endif

onnx-mlir/include/OnnxMlirRuntime.h

Lines changed: 180 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,180 @@
1+
/*
2+
* SPDX-License-Identifier: Apache-2.0
3+
*/
4+
5+
//===------- OnnxMlirRuntime.h - ONNX-MLIR Runtime API Declarations -------===//
6+
//
7+
// Copyright 2019-2023 The IBM Research Authors.
8+
//
9+
// =============================================================================
10+
//
11+
// This file contains declaration of external OMTensor data structures and
12+
// helper functions.
13+
//
14+
//===----------------------------------------------------------------------===//
15+
#ifndef ONNX_MLIR_ONNXMLIRRUNTIME_H
16+
#define ONNX_MLIR_ONNXMLIRRUNTIME_H
17+
18+
#ifdef __cplusplus
19+
#include <cstdint>
20+
#else
21+
#include <stdbool.h>
22+
#include <stdint.h>
23+
#endif
24+
25+
#include <onnx-mlir/Runtime/OMEntryPoint.h>
26+
#include <onnx-mlir/Runtime/OMInstrument.h>
27+
#include <onnx-mlir/Runtime/OMSignature.h>
28+
#include <onnx-mlir/Runtime/OMTensor.h>
29+
#include <onnx-mlir/Runtime/OMTensorList.h>
30+
31+
/*! \mainpage ONNX-MLIR Runtime API documentation
32+
*
33+
* \section intro_sec Introduction
34+
*
35+
* ONNX-MLIR project comes with an executable `onnx-mlir` capable
36+
* of compiling onnx models to a shared library. In this documentation, we
37+
* demonstrate how to interact programmatically with the compiled
38+
* shared library using ONNX-MLIR's Runtime API.
39+
*
40+
* \section c-runtime-api C Runtime API
41+
*
42+
* \subsection data-structures Data Structures
43+
*
44+
* `OMTensor` is the data structure used to describe the runtime information
45+
* (rank, shape, data type, etc) associated with a tensor input or output.
46+
*
47+
* `OMTensorList` is the data structure used to hold a list of pointers to
48+
* OMTensor so that they can be passed into and out of the compiled model as
49+
* inputs and outputs.
50+
*
51+
* `OMEntryPoint` is the data structure used to return all entry point names
52+
* in a model. These entry point names are the symbols of the inference functions
53+
* in the model.
54+
*
55+
* `OMSignature` is the data structure used to return the output signature of
56+
* the given entry point as a JSON string.
57+
*
58+
* \subsection model-entry-point-signature Model Entry Point Signature
59+
*
60+
* All compiled models will have the same exact C function signature equivalent
61+
* to:
62+
*
63+
* ```c
64+
* OMTensorList* run_main_graph(OMTensorList*);
65+
* ```
66+
*
67+
* Intuitively, the model takes a list of tensors as input and returns a list of
68+
* tensors as output.
69+
*
70+
* \subsection invoke-models-using-c-runtime-api Invoke Models Using C Runtime
71+
* API
72+
*
73+
* We demonstrate using the API functions to run a simple ONNX model consisting
74+
* of an add operation. To create such an onnx model, use this
75+
* <a href="gen_add_onnx.py" target="_blank"><b>python script</b></a>
76+
*
77+
* To compile the above model, run `onnx-mlir add.onnx` and a binary library
78+
* "add.so" should appear. We can use the following C code to call into the
79+
* compiled function computing the sum of two inputs:
80+
*
81+
* ```c
82+
* #include <OnnxMlirRuntime.h>
83+
* #include <stdio.h>
84+
*
85+
* OMTensorList *run_main_graph(OMTensorList *);
86+
*
87+
* OMTensorList *create_input_list() {
88+
* // Shared shape & rank.
89+
* int64_t shape[] = {3, 2};
90+
* int64_t num_elements = shape[0] * shape[1];
91+
* int64_t rank = 2;
92+
*
93+
* // Construct float arrays filled with 1s or 2s.
94+
* float *x1Data = (float *)malloc(sizeof(float) * num_elements);
95+
* for (int i = 0; i < num_elements; i++)
96+
* x1Data[i] = 1.0;
97+
* float *x2Data = (float *)malloc(sizeof(float) * num_elements);
98+
* for (int i = 0; i < num_elements; i++)
99+
* x2Data[i] = 2.0;
100+
*
101+
* // Use omTensorCreateWithOwnership "true" so float arrays are automatically
102+
* // freed when the Tensors are destroyed.
103+
* OMTensor *x1 = omTensorCreateWithOwnership(x1Data, shape, rank, ONNX_TYPE_FLOAT, true);
104+
* OMTensor *x2 = omTensorCreateWithOwnership(x2Data, shape, rank, ONNX_TYPE_FLOAT, true);
105+
*
106+
* // Construct a TensorList using the Tensors
107+
* OMTensor *list[2] = {x1, x2};
108+
* return omTensorListCreate(list, 2);
109+
* }
110+
*
111+
* int main() {
112+
* // Generate input TensorList
113+
* OMTensorList *input_list = create_input_list();
114+
*
115+
* // Call the compiled onnx model function.
116+
* OMTensorList *output_list = run_main_graph(input_list);
117+
* if (!output_list) {
118+
* // May inspect errno to get info about the error.
119+
* return 1;
120+
* }
121+
*
122+
* // Get the first tensor from output list.
123+
* OMTensor *y = omTensorListGetOmtByIndex(output_list, 0);
124+
* float *outputPtr = (float *) omTensorGetDataPtr(y);
125+
*
126+
* // Print its content, should be all 3.
127+
* for (int i = 0; i < 6; i++)
128+
* printf("%f ", outputPtr[i]);
129+
* printf("\n");
130+
*
131+
* // Destory the list and the tensors inside of it.
132+
* // Use omTensorListDestroyShallow if only want to destroy the list themselves.
133+
* omTensorListDestroy(input_list);
134+
* omTensorListDestroy(output_list);
135+
* return 0;
136+
* }
137+
* ```
138+
*
139+
* Compile with `gcc main.c add.so -o add`, you should see an executable `add`
140+
* appearing. Run it, and the output should be:
141+
*
142+
* ```
143+
* 3.000000 3.000000 3.000000 3.000000 3.000000 3.000000
144+
* ```
145+
* Exactly as it should be.
146+
*
147+
* \subsection freeing-tensor-memory Freeing Tensor Memory
148+
*
149+
* In general, if a caller creates a tensor object (omTensorCreate), they are
150+
* responsible for deallocating the data buffer separately after the tensor is
151+
* destroyed. If onnx-mlir creates the tensor (run_main_graph), then the
152+
* tensor object owns the data buffer and it is freed automatically when the
153+
* tensor is destroyed.
154+
*
155+
* This default behavior can be changed. When creating a tensor, a user may use
156+
* omTensorCreateWithOwnership to explicitly set data buffer ownership. Additionally,
157+
* after a tenor is created, omTensorSetOwning can be used to change
158+
* the ownership setting.
159+
*
160+
* When omTensorDestroy is called, if the ownership flag is set to "true",
161+
* then the destruction of the tensor will also free any associated data buffer
162+
* memory. If the ownership flag is set to "false", then the user is responsible
163+
* for freeing the data buffer memory after destroying the tensor.
164+
*
165+
* For tensor list objects, when omTensorListDestory is called, omTensorDestory
166+
* is called on all tensors the list contained. The data buffer of each tensor
167+
* is freed based on each tensor's ownership setting.
168+
*
169+
* To destroy a TensorList without automatically destorying the tensors it
170+
* contained, use omTensorListDestroyShallow.
171+
*
172+
* \subsection reference Reference
173+
*
174+
* For full reference to available C Runtime API, refer to
175+
* `include/onnx-mlir/Runtime/OMTensor.h` and
176+
* `include/onnx-mlir/Runtime/OMTensorList.h`.
177+
*
178+
*/
179+
180+
#endif // ONNX_MLIR_ONNXMLIRRUNTIME_H
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
3+
add_subdirectory(Runtime)
4+
add_subdirectory(Compiler)
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
3+
install(FILES OMCompilerTypes.h DESTINATION include/onnx-mlir/Compiler)
4+
install(FILES OMCompilerMacros.h DESTINATION include/onnx-mlir/Compiler)
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
/*
2+
* SPDX-License-Identifier: Apache-2.0
3+
*/
4+
5+
//====----------------- Compiler.h - Compiler abstraction support ---------===//
6+
//
7+
// Copyright 2022 The IBM Research Authors.
8+
//
9+
// =============================================================================
10+
//
11+
// This file defines several macros, which allows use of compiler-specific
12+
// features in a way that remains portable. This header can be included from
13+
// either C or C++.
14+
//
15+
//===----------------------------------------------------------------------===//
16+
17+
#ifndef ONNX_MLIR_COMPILER_MACROS_H
18+
#define ONNX_MLIR_COMPILER_MACROS_H
19+
20+
/// OM_EXTERNAL_VISIBILITY - classes, functions, and variables marked with this
21+
/// keywork will be made public and visible outside of any shared library they
22+
/// are linked in to.
23+
#if defined(_WIN32)
24+
#define OM_EXTERNAL_VISIBILITY __declspec(dllexport)
25+
#else
26+
#define OM_EXTERNAL_VISIBILITY
27+
#endif
28+
29+
#endif // ONNX_MLIR_COMPILER_MACROS_H

0 commit comments

Comments
 (0)