Skip to content

Updated Tensorflow.Net to 0.70.2 with Tensorflow 2.7.0. #7472

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions eng/Versions.props
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@
<ParquetDotNetVersion>2.1.3</ParquetDotNetVersion>
<PlotlyNETCSharpVersion>0.11.1</PlotlyNETCSharpVersion>
<SharpZipLibVersion>1.4.2</SharpZipLibVersion>
<TensorflowDotNETVersion>0.20.1</TensorflowDotNETVersion>
<TensorflowDotNETVersion>0.70.2</TensorflowDotNETVersion>
<TensorFlowMajorVersion>2</TensorFlowMajorVersion>
<TensorFlowVersion>2.3.1</TensorFlowVersion>
<TensorFlowVersion>2.7.0</TensorFlowVersion>
<TorchSharpPyBridgeVersion>1.4.1</TorchSharpPyBridgeVersion>
<AutoGenVersion>0.2.3</AutoGenVersion>
<SemanticKernelVersion>1.48.0</SemanticKernelVersion>
Expand All @@ -90,6 +90,7 @@
<BenchmarkDotNetVersion>0.13.12</BenchmarkDotNetVersion>
<DotNetRuntime60Version>6.0.36</DotNetRuntime60Version>
<DotNetRuntime80Version>8.0.16</DotNetRuntime80Version>
<DotNetRuntime90Version>9.0.5</DotNetRuntime90Version>
<AwesomeAssertionsVersion>8.1.0</AwesomeAssertionsVersion>
<MicrosoftCodeAnalysisTestingVersion>1.1.2</MicrosoftCodeAnalysisTestingVersion>
<MicrosoftDotNetBuildTasksTemplatingVersion>9.0.0-beta.24212.4</MicrosoftDotNetBuildTasksTemplatingVersion>
Expand Down
6 changes: 4 additions & 2 deletions global.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@
"runtimes": {
"dotnet": [
"$(DotNetRuntime60Version)",
"$(DotNetRuntime80Version)"
"$(DotNetRuntime80Version)",
"$(DotNetRuntime90Version)"
],
"dotnet/x86": [
"$(DotNetRuntime60Version)",
"$(DotNetRuntime80Version)"
"$(DotNetRuntime80Version)",
"$(DotNetRuntime90Version)"
]
}
},
Expand Down
21 changes: 21 additions & 0 deletions src/Microsoft.ML.Core/Utilities/ArrayUtils.cs
Original file line number Diff line number Diff line change
Expand Up @@ -100,5 +100,26 @@ public static int EnsureSize<T>(ref T[] array, int min, int max, bool keepOld, o
resized = true;
return newSize;
}

public static int[] CastLongArrayToIntArray(long[] source)
{
if (source == null)
throw new ArgumentNullException(nameof(source));

int[] result = new int[source.Length];

for (int i = 0; i < source.Length; i++)
{
long value = source[i];
if (value > int.MaxValue || value < int.MinValue)
{
throw new OverflowException($"Value at index {i} ({value}) cannot be safely cast to int.");
}

result[i] = (int)value;
}

return result;
}
}
}
18 changes: 18 additions & 0 deletions src/Microsoft.ML.DataView/VectorType.cs
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,24 @@ public VectorDataViewType(PrimitiveDataViewType itemType, params int[] dimension
Size = ComputeSize(Dimensions);
}

/// <summary>
/// Constructs a potentially multi-dimensional vector type.
/// </summary>
/// <param name="itemType">The type of the items contained in the vector.</param>
/// <param name="dimensions">The dimensions. Note that, like <see cref="Dimensions"/>, must be non-empty, with all
/// non-negative values. Also, because <see cref="Size"/> is the product of <see cref="Dimensions"/>, the result of
/// multiplying all these values together must not overflow <see cref="int"/>.</param>
public VectorDataViewType(PrimitiveDataViewType itemType, params long[] dimensions)
: base(GetRawType(itemType))
{
Contracts.CheckParam(ArrayUtils.Size(dimensions) > 0, nameof(dimensions));
Contracts.CheckParam(dimensions.All(d => d >= 0), nameof(dimensions));

ItemType = itemType;
Dimensions = ArrayUtils.CastLongArrayToIntArray(dimensions).ToImmutableArray();
Size = ComputeSize(Dimensions);
}

/// <summary>
/// Constructs a potentially multi-dimensional vector type.
/// </summary>
Expand Down
4 changes: 2 additions & 2 deletions src/Microsoft.ML.TensorFlow/TensorTypeExtensions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ public static void ToScalar<T>(this Tensor tensor, ref T dst) where T : unmanage
return;
}

if (typeof(T).as_dtype() != tensor.dtype)
if (typeof(T).as_tf_dtype() != tensor.dtype)
throw new NotSupportedException();

unsafe
Expand All @@ -37,7 +37,7 @@ public static void ToScalar<T>(this Tensor tensor, ref T dst) where T : unmanage

public static void CopyTo<T>(this Tensor tensor, Span<T> values) where T : unmanaged
{
if (typeof(T).as_dtype() != tensor.dtype)
if (typeof(T).as_tf_dtype() != tensor.dtype)
throw new NotSupportedException();

unsafe
Expand Down
62 changes: 31 additions & 31 deletions src/Microsoft.ML.TensorFlow/TensorflowTransform.cs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
using Microsoft.ML.Runtime;
using Microsoft.ML.TensorFlow;
using Microsoft.ML.Transforms;
using NumSharp;
using Tensorflow;
using Tensorflow.NumPy;
using static Microsoft.ML.TensorFlow.TensorFlowUtils;
using static Tensorflow.Binding;
using Utils = Microsoft.ML.Internal.Utilities.Utils;
Expand Down Expand Up @@ -51,7 +51,7 @@ public sealed class TensorFlowTransformer : RowToRowTransformerBase, IDisposable
internal readonly DataViewType[] OutputTypes;
internal readonly TF_DataType[] TFOutputTypes;
internal readonly TF_DataType[] TFInputTypes;
internal readonly TensorShape[] TFInputShapes;
internal readonly Shape[] TFInputShapes;
internal readonly (Operation, int)[] TFInputOperations;
internal readonly (Operation, int)[] TFOutputOperations;
internal TF_Output[] TFInputNodes;
Expand Down Expand Up @@ -212,14 +212,14 @@ internal TensorFlowTransformer(IHostEnvironment env, TensorFlowEstimator.Options
env.CheckValue(options, nameof(options));
}

private static ITensorValueGetter CreateTensorValueGetter<T>(DataViewRow input, bool isVector, int colIndex, TensorShape tfShape)
private static ITensorValueGetter CreateTensorValueGetter<T>(DataViewRow input, bool isVector, int colIndex, Shape tfShape)
{
if (isVector)
return new TensorValueGetterVec<T>(input, colIndex, tfShape);
return new TensorValueGetter<T>(input, colIndex, tfShape);
}

private static ITensorValueGetter CreateTensorValueGetter(DataViewRow input, TF_DataType tfType, bool isVector, int colIndex, TensorShape tfShape)
private static ITensorValueGetter CreateTensorValueGetter(DataViewRow input, TF_DataType tfType, bool isVector, int colIndex, Shape tfShape)
{
var type = Tf2MlNetType(tfType);
return Utils.MarshalInvoke(CreateTensorValueGetter<int>, type.RawType, input, isVector, colIndex, tfShape);
Expand All @@ -230,7 +230,7 @@ private static ITensorValueGetter[] GetTensorValueGetters(
int[] inputColIndices,
bool[] isInputVector,
TF_DataType[] tfInputTypes,
TensorShape[] tfInputShapes)
Shape[] tfInputShapes)
{
var srcTensorGetters = new ITensorValueGetter[inputColIndices.Length];
for (int i = 0; i < inputColIndices.Length; i++)
Expand Down Expand Up @@ -331,10 +331,10 @@ private static (Operation, int) GetOperationFromName(string operation, Session s
return (session.graph.OperationByName(operation), 0);
}

internal static (TF_DataType[] tfInputTypes, TensorShape[] tfInputShapes, (Operation, int)[]) GetInputInfo(IHost host, Session session, string[] inputs, int batchSize = 1)
internal static (TF_DataType[] tfInputTypes, Shape[] tfInputShapes, (Operation, int)[]) GetInputInfo(IHost host, Session session, string[] inputs, int batchSize = 1)
{
var tfInputTypes = new TF_DataType[inputs.Length];
var tfInputShapes = new TensorShape[inputs.Length];
var tfInputShapes = new Shape[inputs.Length];
var tfInputOperations = new (Operation, int)[inputs.Length];

int index = 0;
Expand All @@ -351,15 +351,15 @@ internal static (TF_DataType[] tfInputTypes, TensorShape[] tfInputShapes, (Opera
throw host.ExceptParam(nameof(session), $"Input type '{tfInputType}' of input column '{input}' is not supported in TensorFlow");

tfInputTypes[index] = tfInputType;
tfInputShapes[index] = ((Tensor)inputTensor).TensorShape;
tfInputShapes[index] = ((Tensor)inputTensor).shape;
tfInputOperations[index] = (inputTensor, inputTensorIndex);
index++;
}

return (tfInputTypes, tfInputShapes, tfInputOperations);
}

internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status status = null)
internal static Shape GetTensorShape(TF_Output output, Graph graph, Status status = null)
{
if (graph == IntPtr.Zero)
throw new ObjectDisposedException(nameof(graph));
Expand All @@ -370,12 +370,12 @@ internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status
cstatus.Check();

if (n == -1)
return new TensorShape(new int[0]);
return new Shape(new int[0]);

var dims = new long[n];
c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus.Handle);
cstatus.Check();
return new TensorShape(dims.Select(x => (int)x).ToArray());
return new Shape(dims.Select(x => (int)x).ToArray());
}

internal static (TF_DataType[] tfOutputTypes, DataViewType[] outputTypes, (Operation, int)[]) GetOutputInfo(IHost host, Session session, string[] outputs, bool treatOutputAsBatched)
Expand Down Expand Up @@ -404,10 +404,10 @@ internal static (TF_DataType[] tfOutputTypes, DataViewType[] outputTypes, (Opera
// This is the work around in absence of reshape transformer.
var idims = shape.dims;

int[] dims = idims;
long[] dims = idims;
if (treatOutputAsBatched)
{
dims = shape.ndim > 0 ? idims.Skip(idims[0] == -1 ? 1 : 0).ToArray() : new int[0];
dims = shape.ndim > 0 ? idims.Skip(idims[0] == -1 ? 1 : 0).ToArray() : new long[0];
}
for (int j = 0; j < dims.Length; j++)
dims[j] = dims[j] == -1 ? 0 : dims[j];
Expand Down Expand Up @@ -517,7 +517,7 @@ public void Dispose()

if (Session != null && Session != IntPtr.Zero)
{
Session.close(); // invoked Dispose()
Session.Dispose();
}
}
finally
Expand All @@ -536,7 +536,7 @@ private sealed class Mapper : MapperBase
private readonly TensorFlowTransformer _parent;
private readonly int[] _inputColIndices;
private readonly bool[] _isInputVector;
private readonly TensorShape[] _fullySpecifiedShapes;
private readonly Shape[] _fullySpecifiedShapes;
private readonly ConcurrentBag<Runner> _runners;

public Mapper(TensorFlowTransformer parent, DataViewSchema inputSchema) :
Expand All @@ -546,7 +546,7 @@ public Mapper(TensorFlowTransformer parent, DataViewSchema inputSchema) :
_parent = parent;
_inputColIndices = new int[_parent.Inputs.Length];
_isInputVector = new bool[_parent.Inputs.Length];
_fullySpecifiedShapes = new TensorShape[_parent.Inputs.Length];
_fullySpecifiedShapes = new Shape[_parent.Inputs.Length];
for (int i = 0; i < _parent.Inputs.Length; i++)
{
if (!inputSchema.TryGetColumnIndex(_parent.Inputs[i], out _inputColIndices[i]))
Expand All @@ -570,19 +570,19 @@ public Mapper(TensorFlowTransformer parent, DataViewSchema inputSchema) :
{
vecType = (VectorDataViewType)type;
var colTypeDims = vecType.Dimensions.Select(dim => (int)dim).ToArray();
_fullySpecifiedShapes[i] = new TensorShape(colTypeDims);
_fullySpecifiedShapes[i] = new Shape(colTypeDims);
}
else
// for primitive type use default TensorShape
_fullySpecifiedShapes[i] = new TensorShape();
_fullySpecifiedShapes[i] = new Shape(Array.Empty<long>());
}
else
{
vecType = (VectorDataViewType)type;
var colTypeDims = vecType.Dimensions.Select(dim => (int)dim).ToArray();
// If the column is one dimension we make sure that the total size of the TF shape matches.
// Compute the total size of the known dimensions of the shape.
int valCount = 1;
long valCount = 1;
int numOfUnkDim = 0;
foreach (var s in shape)
{
Expand All @@ -592,7 +592,7 @@ public Mapper(TensorFlowTransformer parent, DataViewSchema inputSchema) :
numOfUnkDim++;
}
// The column length should be divisible by this, so that the other dimensions can be integral.
int typeValueCount = type.GetValueCount();
long typeValueCount = type.GetValueCount();
if (typeValueCount % valCount != 0)
throw Contracts.Except($"Input shape mismatch: Input '{_parent.Inputs[i]}' has shape {originalShape.ToString()}, but input data is of length {typeValueCount}.");

Expand All @@ -616,22 +616,22 @@ public Mapper(TensorFlowTransformer parent, DataViewSchema inputSchema) :
throw Contracts.Except($"Input shape mismatch: Input '{_parent.Inputs[i]}' has shape {originalShape.ToString()}, but input data is of length {typeValueCount}.");

// Fill in the unknown dimensions.
var l = new int[originalShapeNdim];
var l = new long[originalShapeNdim];
for (int ishape = 0; ishape < originalShapeNdim; ishape++)
l[ishape] = originalShapeDims[ishape] == -1 ? (int)d : originalShapeDims[ishape];
_fullySpecifiedShapes[i] = new TensorShape(l);
l[ishape] = originalShapeDims[ishape] == -1 ? (long)d : originalShapeDims[ishape];
_fullySpecifiedShapes[i] = new Shape(l);
}

if (_parent._addBatchDimensionInput)
{
// ndim of default TensorShape is -1, make originDim to 0 in this case.
// after addBatchDimension, input column will be changed: type -> type[]
var originDim = _fullySpecifiedShapes[i].ndim < 0 ? 0 : _fullySpecifiedShapes[i].ndim;
var l = new int[originDim + 1];
var l = new long[originDim + 1];
l[0] = 1;
for (int ishape = 1; ishape < l.Length; ishape++)
l[ishape] = _fullySpecifiedShapes[i].dims[ishape - 1];
_fullySpecifiedShapes[i] = new TensorShape(l);
_fullySpecifiedShapes[i] = new Shape(l);
}
}

Expand Down Expand Up @@ -720,7 +720,7 @@ private Delegate MakeGetter<T>(DataViewRow input, int iinfo, ITensorValueGetter[
UpdateCacheIfNeeded(input.Position, srcTensorGetters, activeOutputColNames, outputCache);

var tensor = outputCache.Outputs[_parent.Outputs[iinfo]];
var tensorSize = tensor.TensorShape.dims.Where(x => x > 0).Aggregate((x, y) => x * y);
var tensorSize = tensor.shape.dims.Where(x => x > 0).Aggregate((x, y) => x * y);

var editor = VBufferEditor.Create(ref dst, (int)tensorSize);
FetchStringData(tensor, editor.Values);
Expand All @@ -735,7 +735,7 @@ private Delegate MakeGetter<T>(DataViewRow input, int iinfo, ITensorValueGetter[
UpdateCacheIfNeeded(input.Position, srcTensorGetters, activeOutputColNames, outputCache);

var tensor = outputCache.Outputs[_parent.Outputs[iinfo]];
var tensorSize = tensor.TensorShape.dims.Where(x => x > 0).Aggregate((x, y) => x * y);
var tensorSize = tensor.shape.dims.Where(x => x > 0).Aggregate((x, y) => x * y);

var editor = VBufferEditor.Create(ref dst, (int)tensorSize);

Expand Down Expand Up @@ -821,10 +821,10 @@ private class TensorValueGetter<T> : ITensorValueGetter
{
private readonly ValueGetter<T> _srcgetter;
private readonly T[] _bufferedData;
private readonly TensorShape _tfShape;
private readonly Shape _tfShape;
private int _position;

public TensorValueGetter(DataViewRow input, int colIndex, TensorShape tfShape)
public TensorValueGetter(DataViewRow input, int colIndex, Shape tfShape)
{
_srcgetter = input.GetGetter<T>(input.Schema[colIndex]);
_tfShape = tfShape;
Expand Down Expand Up @@ -864,15 +864,15 @@ public Tensor GetBufferedBatchTensor()
private class TensorValueGetterVec<T> : ITensorValueGetter
{
private readonly ValueGetter<VBuffer<T>> _srcgetter;
private readonly TensorShape _tfShape;
private readonly Shape _tfShape;
private VBuffer<T> _vBuffer;
private T[] _denseData;
private T[] _bufferedData;
private int _position;
private readonly long[] _dims;
private readonly long _bufferedDataSize;

public TensorValueGetterVec(DataViewRow input, int colIndex, TensorShape tfShape)
public TensorValueGetterVec(DataViewRow input, int colIndex, Shape tfShape)
{
_srcgetter = input.GetGetter<VBuffer<T>>(input.Schema[colIndex]);
_tfShape = tfShape;
Expand Down
Loading
Loading