-
Notifications
You must be signed in to change notification settings - Fork 1.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Upgrade TF.Net version from 0.10.10 to 0.11.3 #4205
Changes from 8 commits
2186651
df47f1a
efa7ea8
72b5715
9536843
cd5be6d
3008242
579bdb6
e599ac9
5eaa5d2
42ec0d4
90f7ffd
6f3d33d
ffc95e3
e633904
9c2371c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -235,7 +235,8 @@ private void CheckTrainingParameters(DnnRetrainEstimator.Options options) | |
inputTensor.InputType(index); | ||
var tfInputShape = ((Tensor)inputTensor).TensorShape; | ||
|
||
if (isInputVector && (tfInputShape == null || (tfInputShape.NDim == 0))) | ||
var numInputDims = tfInputShape.ndim; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
It seems tfInputShape can be null. We should set numInputDims to -1 if tfInputShape is null otherwise assign numInputDims = tfInputShape.ndim; #Closed |
||
if (isInputVector && (tfInputShape == null || (numInputDims == 0))) | ||
{ | ||
var vecType = (VectorDataViewType)type; | ||
var colTypeDims = new int[vecType.Dimensions.Length + 1]; | ||
|
@@ -245,13 +246,14 @@ private void CheckTrainingParameters(DnnRetrainEstimator.Options options) | |
|
||
tfInputShape = new TensorShape(colTypeDims); | ||
} | ||
if (tfInputShape.NDim != -1) | ||
if (numInputDims != -1) | ||
{ | ||
var newShape = new int[tfInputShape.NDim]; | ||
newShape[0] = tfInputShape[0] == 0 || tfInputShape[0] == -1 ? batchSize : tfInputShape[0]; | ||
var newShape = new int[numInputDims]; | ||
var dims = tfInputShape.dims; | ||
newShape[0] = dims[0] == 0 || dims[0] == -1 ? batchSize : dims[0]; | ||
|
||
for (int j = 1; j < tfInputShape.NDim; j++) | ||
newShape[j] = tfInputShape[j]; | ||
for (int j = 1; j < numInputDims; j++) | ||
newShape[j] = dims[j]; | ||
tfInputShape = new TensorShape(newShape); | ||
} | ||
|
||
|
@@ -382,10 +384,9 @@ private void TrainCore(DnnRetrainEstimator.Options options, IDataView input, IDa | |
runner.AddInput(inputs[i], srcTensorGetters[i].GetBufferedBatchTensor()); | ||
|
||
Tensor[] tensor = runner.Run(); | ||
var buffer = tensor[0].Data(); | ||
loss = tensor.Length > 0 && tensor[0] != IntPtr.Zero ? (float)tensor[0].Data<float>()[0] : 0.0f; | ||
metric = tensor.Length > 1 && tensor[1] != IntPtr.Zero ? (float)tensor[1].Data<float>()[0] : 0.0f; | ||
var b = tensor.Length > 2 && tensor[2] != IntPtr.Zero ? (float[])tensor[2].Data<float>() : null; | ||
loss = tensor.Length > 0 && tensor[0] != IntPtr.Zero ? (float)tensor[0].ToArray<float>()[0] : 0.0f; | ||
eerhardt marked this conversation as resolved.
Show resolved
Hide resolved
|
||
metric = tensor.Length > 1 && tensor[1] != IntPtr.Zero ? (float)tensor[1].ToArray<float>()[0] : 0.0f; | ||
var b = tensor.Length > 2 && tensor[2] != IntPtr.Zero ? (float[])tensor[2].ToArray<float>() : null; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't see |
||
return (loss, metric); | ||
} | ||
|
||
|
@@ -639,7 +640,7 @@ internal static (TF_DataType[] tfOutputTypes, DataViewType[] outputTypes, (Opera | |
// i.e. the first dimension (if unknown) is assumed to be batch dimension. | ||
// If there are other dimension that are unknown the transformer will return a variable length vector. | ||
// This is the work around in absence of reshape transformer. | ||
int[] dims = shape.NDim > 0 ? shape.Dimensions.Skip(shape[0] == -1 ? 1 : 0).ToArray() : new[] { 0 }; | ||
int[] dims = shape.ndim > 0 ? shape.dims.Skip(shape.dims[0] == -1 ? 1 : 0).ToArray() : new[] { 0 }; | ||
for (int j = 0; j < dims.Length; j++) | ||
dims[j] = dims[j] == -1 ? 0 : dims[j]; | ||
if (dims == null || dims.Length == 0) | ||
|
@@ -780,7 +781,7 @@ public Mapper(DnnRetrainTransformer parent, DataViewSchema inputSchema) : | |
if (type.GetItemType() != expectedType) | ||
throw Host.ExceptSchemaMismatch(nameof(inputSchema), "input", _parent._inputs[i], expectedType.ToString(), type.ToString()); | ||
var originalShape = _parent._tfInputShapes[i]; | ||
var shape = originalShape.Dimensions; | ||
var shape = originalShape.dims; | ||
|
||
var colTypeDims = vecType.Dimensions.Select(dim => (int)dim).ToArray(); | ||
if (shape == null || (shape.Length == 0)) | ||
|
@@ -811,18 +812,20 @@ public Mapper(DnnRetrainTransformer parent, DataViewSchema inputSchema) : | |
throw Contracts.Except($"Input shape mismatch: Input '{_parent._inputs[i]}' has shape {originalShape.ToString()}, but input data is of length {typeValueCount}."); | ||
|
||
// Fill in the unknown dimensions. | ||
var l = new int[originalShape.NDim]; | ||
for (int ishape = 0; ishape < originalShape.NDim; ishape++) | ||
l[ishape] = originalShape[ishape] == -1 ? (int)d : originalShape[ishape]; | ||
var originalShapeDims = originalShape.dims; | ||
var originalShapeNdim = originalShape.ndim; | ||
var l = new int[originalShapeNdim]; | ||
for (int ishape = 0; ishape < originalShapeNdim; ishape++) | ||
l[ishape] = originalShapeDims[ishape] == -1 ? (int)d : originalShapeDims[ishape]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Should this cast also be done outside the loop? #Resolved There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is not an addition in this PR. If required, I can do this change In reply to: 324450404 [](ancestors = 324450404) |
||
_fullySpecifiedShapes[i] = new TensorShape(l); | ||
} | ||
|
||
if (_parent._addBatchDimensionInput) | ||
{ | ||
var l = new int[_fullySpecifiedShapes[i].NDim + 1]; | ||
var l = new int[_fullySpecifiedShapes[i].ndim + 1]; | ||
l[0] = 1; | ||
for (int ishape = 1; ishape < l.Length; ishape++) | ||
l[ishape] = _fullySpecifiedShapes[i][ishape - 1]; | ||
l[ishape] = _fullySpecifiedShapes[i].dims[ishape - 1]; | ||
_fullySpecifiedShapes[i] = new TensorShape(l); | ||
} | ||
} | ||
|
@@ -857,7 +860,7 @@ protected override Delegate MakeGetter(DataViewRow input, int iinfo, Func<int, b | |
return Utils.MarshalInvoke(MakeGetter<int>, type, input, iinfo, srcTensorGetters, activeOutputColNames, outputCache); | ||
} | ||
|
||
private Delegate MakeGetter<T>(DataViewRow input, int iinfo, ITensorValueGetter[] srcTensorGetters, string[] activeOutputColNames, OutputCache outputCache) | ||
private Delegate MakeGetter<T>(DataViewRow input, int iinfo, ITensorValueGetter[] srcTensorGetters, string[] activeOutputColNames, OutputCache outputCache) where T: unmanaged | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Can this ever be used with a There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I have made a separate function for when T is string to avoid this failure. In reply to: 324317321 [](ancestors = 324317321) |
||
{ | ||
Host.AssertValue(input); | ||
|
||
|
@@ -868,7 +871,7 @@ private Delegate MakeGetter<T>(DataViewRow input, int iinfo, ITensorValueGetter[ | |
UpdateCacheIfNeeded(input.Position, srcTensorGetters, activeOutputColNames, outputCache); | ||
|
||
var tensor = outputCache.Outputs[_parent._outputs[iinfo]]; | ||
dst = tensor.Data<T>()[0]; | ||
dst = tensor.ToArray<T>()[0]; | ||
}; | ||
return valuegetter; | ||
} | ||
|
@@ -881,7 +884,7 @@ private Delegate MakeGetter<T>(DataViewRow input, int iinfo, ITensorValueGetter[ | |
UpdateCacheIfNeeded(input.Position, srcTensorGetters, activeOutputColNames, outputCache); | ||
|
||
var tensor = outputCache.Outputs[_parent._outputs[iinfo]]; | ||
var tensorSize = tensor.TensorShape.Dimensions.Where(x => x > 0).Aggregate((x, y) => x * y); | ||
var tensorSize = tensor.TensorShape.dims.Where(x => x > 0).Aggregate((x, y) => x * y); | ||
|
||
var editor = VBufferEditor.Create(ref dst, (int)tensorSize); | ||
DnnUtils.FetchStringData(tensor, editor.Values); | ||
|
@@ -896,11 +899,11 @@ private Delegate MakeGetter<T>(DataViewRow input, int iinfo, ITensorValueGetter[ | |
UpdateCacheIfNeeded(input.Position, srcTensorGetters, activeOutputColNames, outputCache); | ||
|
||
var tensor = outputCache.Outputs[_parent._outputs[iinfo]]; | ||
var tensorSize = tensor.TensorShape.Dimensions.Where(x => x > 0).Aggregate((x, y) => x * y); | ||
var tensorSize = tensor.TensorShape.dims.Where(x => x > 0).Aggregate((x, y) => x * y); | ||
|
||
var editor = VBufferEditor.Create(ref dst, (int)tensorSize); | ||
|
||
DnnUtils.FetchData<T>(tensor.Data<T>(), editor.Values); | ||
DnnUtils.FetchData<T>(tensor.ToArray<T>(), editor.Values); | ||
dst = editor.Commit(); | ||
}; | ||
return valuegetter; | ||
|
@@ -912,6 +915,7 @@ private void UpdateCacheIfNeeded(long position, ITensorValueGetter[] srcTensorGe | |
{ | ||
if (outputCache.Position != position) | ||
{ | ||
_parent._session.graph.as_default(); | ||
Runner runner = new Runner(_parent._session); | ||
|
||
// Feed the inputs. | ||
|
@@ -972,12 +976,12 @@ public TensorValueGetter(DataViewRow input, int colIndex, TensorShape tfShape, b | |
_tfShape = tfShape; | ||
long size = 0; | ||
_position = 0; | ||
if (tfShape.Dimensions.Length != 0) | ||
if (tfShape.dims.Length != 0) | ||
{ | ||
size = 1; | ||
foreach (var dim in tfShape.Dimensions) | ||
foreach (var dim in tfShape.dims) | ||
size *= dim; | ||
_dims = _tfShape.Dimensions.Select(x => (long)x).ToArray(); | ||
_dims = _tfShape.dims.Select(x => (long)x).ToArray(); | ||
} | ||
if (keyType) | ||
_bufferedDataLong = new long[size]; | ||
|
@@ -993,13 +997,13 @@ public Tensor GetTensor() | |
if (_keyType) | ||
{ | ||
var tensor = new Tensor(new[] { Convert.ToInt64(scalar) - 1 }); | ||
tensor.SetShape(_tfShape); | ||
tensor.set_shape(_tfShape); | ||
return tensor; | ||
} | ||
else | ||
{ | ||
var tensor = new Tensor(new[] { scalar }); | ||
tensor.SetShape(_tfShape); | ||
tensor.set_shape(_tfShape); | ||
return tensor; | ||
} | ||
} | ||
|
@@ -1095,16 +1099,16 @@ public TensorValueGetterVec(DataViewRow input, int colIndex, TensorShape tfShape | |
|
||
long size = 0; | ||
_position = 0; | ||
if (tfShape.Dimensions.Length != 0) | ||
if (tfShape.dims.Length != 0) | ||
{ | ||
size = 1; | ||
foreach (var dim in tfShape.Dimensions) | ||
foreach (var dim in tfShape.dims) | ||
size *= dim; | ||
} | ||
_bufferedData = new T[size]; | ||
_bufferedDataSize = size; | ||
if (_tfShape.Dimensions != null) | ||
_dims = _tfShape.Dimensions.Select(x => (long)x).ToArray(); | ||
if (_tfShape.dims != null) | ||
_dims = _tfShape.dims.Select(x => (long)x).ToArray(); | ||
} | ||
|
||
public Tensor GetTensor() | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,7 +11,7 @@ | |
using Microsoft.ML.Data; | ||
using Microsoft.ML.Runtime; | ||
using Tensorflow; | ||
using static Tensorflow.Python; | ||
using static Tensorflow.Binding; | ||
|
||
namespace Microsoft.ML.Transforms.Dnn | ||
{ | ||
|
@@ -92,11 +92,10 @@ internal static Session LoadTFSession(IExceptionContext ectx, byte[] modelBytes, | |
|
||
internal static Graph LoadMetaGraph(string path) | ||
{ | ||
return tf_with(tf.Graph().as_default(), graph => | ||
{ | ||
tf.train.import_meta_graph(path); | ||
return graph; | ||
}); | ||
var graph = new Graph(); | ||
graph = graph.as_default(); | ||
tf.train.import_meta_graph(path); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
It seems like there is no connection between There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This creates a new graph and the 'as_default()' sets this as the tf default graph https://github.com/SciSharp/TensorFlow.NET/blob/41432600c8263cf972b13930c31479ffb412fa65/src/TensorFlowNET.Core/Graphs/Graph.cs#L134. The 'graph' is where the meta graph is loaded and returned. In reply to: 324450631 [](ancestors = 324450631) |
||
return graph; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. remove new lines. #Resolved |
||
} | ||
|
||
internal static Session LoadTFSessionByModelFilePath(IExceptionContext ectx, string modelFile, bool metaGraph = false) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -20,7 +20,7 @@ | |
using static Microsoft.ML.Data.TextLoader; | ||
using static Microsoft.ML.Transforms.Dnn.DnnUtils; | ||
using static Microsoft.ML.Transforms.ImageClassificationEstimator; | ||
using static Tensorflow.Python; | ||
using static Tensorflow.Binding; | ||
using Architecture = Microsoft.ML.Transforms.ImageClassificationEstimator.Architecture; | ||
|
||
[assembly: LoadableClass(ImageClassificationTransformer.Summary, typeof(IDataTransform), typeof(ImageClassificationTransformer), | ||
|
@@ -170,7 +170,7 @@ private void CheckTrainingParameters(ImageClassificationEstimator.Options option | |
Host.CheckNonWhiteSpace(options.LabelColumn, nameof(options.LabelColumn)); | ||
Host.CheckNonWhiteSpace(options.TensorFlowLabel, nameof(options.TensorFlowLabel)); | ||
|
||
if (_session.graph.OperationByName(options.TensorFlowLabel) == null) | ||
if (_session.graph.OperationByName(_labelTensor.name.Split(':')[0]) == null) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
If this is the name of the operation you are searching for in the graph, then the error message should mention the correct name. In this case, the options argument is not needed here. #Resolved There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the options argument is not used anymore. The _labelTensor has the correct name that the error message would need In reply to: 324450724 [](ancestors = 324450724) |
||
throw Host.ExceptParam(nameof(options.TensorFlowLabel), $"'{options.TensorFlowLabel}' does not exist in the model"); | ||
} | ||
|
||
|
@@ -243,7 +243,7 @@ private void CacheFeaturizedImagesToDisk(IDataView input, string labelColumnName | |
var imageTensor = imageProcessor.ProcessImage(imagePathStr); | ||
runner.AddInput(imageTensor, 0); | ||
var featurizedImage = runner.Run()[0]; // Reuse memory? | ||
writer.WriteLine(label - 1 + "," + string.Join(",", featurizedImage.Data<float>())); | ||
writer.WriteLine(label - 1 + "," + string.Join(",", featurizedImage.ToArray<float>())); | ||
eerhardt marked this conversation as resolved.
Show resolved
Hide resolved
|
||
featurizedImage.Dispose(); | ||
imageTensor.Dispose(); | ||
metrics.Bottleneck.Index++; | ||
|
@@ -378,8 +378,8 @@ private void TrainAndEvaluateClassificationLayer(string trainBottleneckFilePath, | |
.AddInput(new Tensor(labelBatchPtr, labelTensorShape, TF_DataType.TF_INT64, labelBatchSizeInBytes), 1) | ||
.Run(); | ||
|
||
metrics.Train.Accuracy += outputTensors[0].Data<float>()[0]; | ||
metrics.Train.CrossEntropy += outputTensors[1].Data<float>()[0]; | ||
metrics.Train.Accuracy += outputTensors[0].ToArray<float>()[0]; | ||
metrics.Train.CrossEntropy += outputTensors[1].ToArray<float>()[0]; | ||
|
||
outputTensors[0].Dispose(); | ||
outputTensors[1].Dispose(); | ||
|
@@ -429,7 +429,7 @@ private void TrainAndEvaluateClassificationLayer(string trainBottleneckFilePath, | |
.AddInput(new Tensor(labelBatchPtr, labelTensorShape, TF_DataType.TF_INT64, labelBatchSizeInBytes), 1) | ||
.Run(); | ||
|
||
metrics.Train.Accuracy += outputTensors[0].Data<float>()[0]; | ||
metrics.Train.Accuracy += outputTensors[0].ToArray<float>()[0]; | ||
metrics.Train.BatchProcessedCount += 1; | ||
batchIndex = 0; | ||
|
||
|
@@ -458,17 +458,13 @@ private void TrainAndEvaluateClassificationLayer(string trainBottleneckFilePath, | |
Tensor evaluationStep = null; | ||
Tensor prediction = null; | ||
Tensor bottleneckTensor = evalGraph.OperationByName(_bottleneckOperationName); | ||
|
||
tf_with(evalGraph.as_default(), graph => | ||
{ | ||
var (_, _, groundTruthInput, finalTensor) = AddFinalRetrainOps(classCount, options.LabelColumn, | ||
evalGraph.as_default(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Does this mutate There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In line 842 in this file there is also a call to In reply to: 324450796 [](ancestors = 324450796) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This sets the tensorflow default graph to the evalGraph. This does not mutate the evalGraph. In reply to: 324450796 [](ancestors = 324450796) |
||
var (_, _, groundTruthInput, finalTensor) = AddFinalRetrainOps(classCount, options.LabelColumn, | ||
options.ScoreColumnName, options.LearningRate, bottleneckTensor, false); | ||
|
||
tf.train.Saver().restore(evalSess, _checkpointPath); | ||
(evaluationStep, prediction) = AddEvaluationStep(finalTensor, groundTruthInput); | ||
(_jpegData, _resizedImage) = AddJpegDecoding(299, 299, 3); | ||
}); | ||
|
||
return (evalSess, _labelTensor, evaluationStep, prediction); | ||
} | ||
|
||
|
@@ -530,14 +526,15 @@ private void VariableSummaries(RefVariable var) | |
private (Operation, Tensor, Tensor, Tensor) AddFinalRetrainOps(int classCount, string labelColumn, | ||
string scoreColumnName, float learningRate, Tensor bottleneckTensor, bool isTraining) | ||
{ | ||
var (batch_size, bottleneck_tensor_size) = (bottleneckTensor.TensorShape.Dimensions[0], bottleneckTensor.TensorShape.Dimensions[1]); | ||
var bottleneckTensorDims = bottleneckTensor.TensorShape.dims; | ||
var (batch_size, bottleneck_tensor_size) = (bottleneckTensorDims[0], bottleneckTensorDims[1]); | ||
tf_with(tf.name_scope("input"), scope => | ||
{ | ||
if (isTraining) | ||
{ | ||
_bottleneckInput = tf.placeholder_with_default( | ||
bottleneckTensor, | ||
shape: bottleneckTensor.TensorShape.Dimensions, | ||
shape: bottleneckTensorDims , | ||
name: "BottleneckInputPlaceholder"); | ||
} | ||
|
||
|
@@ -559,7 +556,8 @@ private void VariableSummaries(RefVariable var) | |
RefVariable layerBiases = null; | ||
tf_with(tf.name_scope("biases"), delegate | ||
{ | ||
layerBiases = tf.Variable(tf.zeros(classCount), name: "final_biases"); | ||
TensorShape shape = new TensorShape(classCount); | ||
layerBiases = tf.Variable(tf.zeros(shape), name: "final_biases"); | ||
VariableSummaries(layerBiases); | ||
}); | ||
|
||
|
@@ -599,11 +597,9 @@ private void AddTransferLearningLayer(string labelColumn, | |
string scoreColumnName, float learningRate, int classCount) | ||
{ | ||
_bottleneckTensor = Graph.OperationByName(_bottleneckOperationName); | ||
tf_with(Graph.as_default(), delegate | ||
{ | ||
(_trainStep, _crossEntropy, _labelTensor, _softMaxTensor) = | ||
(_trainStep, _crossEntropy, _labelTensor, _softMaxTensor) = | ||
AddFinalRetrainOps(classCount, labelColumn, scoreColumnName, learningRate, _bottleneckTensor, true); | ||
}); | ||
|
||
} | ||
|
||
// Factory method for SignatureLoadDataTransform. | ||
|
@@ -757,7 +753,7 @@ private protected override void SaveModel(ModelSaveContext ctx) | |
var buffer = _session.graph.ToGraphDef(status); | ||
ctx.SaveBinaryStream("TFModel", w => | ||
{ | ||
w.WriteByteArray(buffer.Data); | ||
w.WriteByteArray(buffer.MemoryBlock.ToArray()); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
can you please do a diff between the graph with your change and master and see if there are any changes? I know the graph size increased and I want to know why and by how much. #Resolved There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The size of the files are exactly the same between the master and my change, but doing a diff shows that the files differ. Are the trained values expected to be exactly the same? In reply to: 323478331 [](ancestors = 323478331) |
||
}); | ||
status.Check(true); | ||
} | ||
|
@@ -830,8 +826,8 @@ public void UpdateCacheIfNeeded() | |
_imagePathGetter(ref _imagePath); | ||
var processedTensor = _imageProcessor.ProcessImage(_imagePath.ToString()); | ||
var outputTensor = _runner.AddInput(processedTensor, 0).Run(); | ||
ClassProbabilities = outputTensor[0].Data<float>(); | ||
PredictedLabel = (UInt32)outputTensor[1].Data<long>()[0]; | ||
ClassProbabilities = outputTensor[0].ToArray<float>(); | ||
PredictedLabel = (UInt32)outputTensor[1].ToArray<long>()[0]; | ||
outputTensor[0].Dispose(); | ||
outputTensor[1].Dispose(); | ||
processedTensor.Dispose(); | ||
|
@@ -843,6 +839,7 @@ public void UpdateCacheIfNeeded() | |
protected override Delegate MakeGetter(DataViewRow input, int iinfo, Func<int, bool> activeOutput, out Action disposer) | ||
{ | ||
disposer = null; | ||
_parent._session.graph.as_default(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
not needed. #Resolved There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Required for the Microsoft.ML.Scenarios.TensorFlowScenariosTests.TensorFlowImageClassification In reply to: 324890773 [](ancestors = 324890773) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
is this needed? #Resolved There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, test Microsoft.ML.Scenarios.TensorFlowScenariosTests.TensorFlowImageClassification is failing without this In reply to: 324938993 [](ancestors = 324938993) |
||
Host.AssertValue(input); | ||
var cache = new OutputCache(input, _parent); | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nit: tabs in empty line. #Resolved