diff --git a/build/Dependencies.props b/build/Dependencies.props
index 46c90c481e..6ca726efe5 100644
--- a/build/Dependencies.props
+++ b/build/Dependencies.props
@@ -16,7 +16,7 @@
3.10.1
2.2.3
2.1.0
- 1.1.2
+ 1.2
0.0.0.9
2.1.3
4.5.0
diff --git a/docs/samples/Microsoft.ML.Samples/Microsoft.ML.Samples.csproj b/docs/samples/Microsoft.ML.Samples/Microsoft.ML.Samples.csproj
index 812114e7a5..5951c4bbd1 100644
--- a/docs/samples/Microsoft.ML.Samples/Microsoft.ML.Samples.csproj
+++ b/docs/samples/Microsoft.ML.Samples/Microsoft.ML.Samples.csproj
@@ -968,6 +968,7 @@
+
diff --git a/pkg/Microsoft.ML.OnnxTransformer/Microsoft.ML.OnnxTransformer.nupkgproj b/pkg/Microsoft.ML.OnnxTransformer/Microsoft.ML.OnnxTransformer.nupkgproj
index c924ef4aba..3c7d9f2ccd 100644
--- a/pkg/Microsoft.ML.OnnxTransformer/Microsoft.ML.OnnxTransformer.nupkgproj
+++ b/pkg/Microsoft.ML.OnnxTransformer/Microsoft.ML.OnnxTransformer.nupkgproj
@@ -8,7 +8,7 @@
-
+
diff --git a/src/Microsoft.ML.OnnxTransformer/Microsoft.ML.OnnxTransformer.csproj b/src/Microsoft.ML.OnnxTransformer/Microsoft.ML.OnnxTransformer.csproj
index d2f51a9429..7612f974ea 100644
--- a/src/Microsoft.ML.OnnxTransformer/Microsoft.ML.OnnxTransformer.csproj
+++ b/src/Microsoft.ML.OnnxTransformer/Microsoft.ML.OnnxTransformer.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/src/Microsoft.ML.OnnxTransformer/OnnxUtils.cs b/src/Microsoft.ML.OnnxTransformer/OnnxUtils.cs
index a0fe800860..ca9110260c 100644
--- a/src/Microsoft.ML.OnnxTransformer/OnnxUtils.cs
+++ b/src/Microsoft.ML.OnnxTransformer/OnnxUtils.cs
@@ -179,9 +179,19 @@ public OnnxModel(string modelFile, int? gpuDeviceId = null, bool fallbackToCpu =
if (gpuDeviceId != null)
{
- // The onnxruntime v1.0 currently does not support running on the GPU on all of ML.NET's supported platforms.
- // This code path will be re-enabled when there is appropriate support in onnxruntime
- throw new NotSupportedException("Running Onnx models on a GPU is temporarily not supported!");
+ try
+ {
+ _session = new InferenceSession(modelFile,
+ SessionOptions.MakeSessionOptionWithCudaProvider(gpuDeviceId.Value));
+ }
+ catch(OnnxRuntimeException)
+ {
+ if (fallbackToCpu)
+ _session = new InferenceSession(modelFile);
+ else
+ // If called from OnnxTransform, is caught and rethrown
+ throw;
+ }
}
else
{
diff --git a/test/Microsoft.ML.Functional.Tests/Microsoft.ML.Functional.Tests.csproj b/test/Microsoft.ML.Functional.Tests/Microsoft.ML.Functional.Tests.csproj
index 2e31450e8d..624384d9b5 100644
--- a/test/Microsoft.ML.Functional.Tests/Microsoft.ML.Functional.Tests.csproj
+++ b/test/Microsoft.ML.Functional.Tests/Microsoft.ML.Functional.Tests.csproj
@@ -40,6 +40,7 @@
+
diff --git a/test/Microsoft.ML.OnnxTransformerTest/Microsoft.ML.OnnxTransformerTest.csproj b/test/Microsoft.ML.OnnxTransformerTest/Microsoft.ML.OnnxTransformerTest.csproj
index bc16e60ad3..4dcc9cdfc9 100644
--- a/test/Microsoft.ML.OnnxTransformerTest/Microsoft.ML.OnnxTransformerTest.csproj
+++ b/test/Microsoft.ML.OnnxTransformerTest/Microsoft.ML.OnnxTransformerTest.csproj
@@ -10,6 +10,7 @@
+
diff --git a/test/Microsoft.ML.Tests/Microsoft.ML.Tests.csproj b/test/Microsoft.ML.Tests/Microsoft.ML.Tests.csproj
index cff0b66736..a0ac494341 100644
--- a/test/Microsoft.ML.Tests/Microsoft.ML.Tests.csproj
+++ b/test/Microsoft.ML.Tests/Microsoft.ML.Tests.csproj
@@ -49,6 +49,7 @@
+