public static void ToArray <T>(this Tensor tensor, ref T[] array) where T : unmanaged { Utils.EnsureSize(ref array, (int)tensor.size, (int)tensor.size, false); var span = new Span <T>(array); CopyTo(tensor, span); }
protected override Delegate MakeGetter(DataViewRow input, int iinfo, Func <int, bool> activeOutput, out Action disposer) { disposer = null; Host.AssertValue(input); var outputCache = new OutputCache(); var activeOutputColNames = _parent.Outputs.Where((x, i) => activeOutput(i)).ToArray(); var type = Tf2MlNetType(_parent.TFOutputTypes[iinfo]).RawType; Host.Assert(type == _parent.OutputTypes[iinfo].GetItemType().RawType); var srcTensorGetters = GetTensorValueGetters(input, _inputColIndices, _isInputVector, _parent.TFInputTypes, _fullySpecifiedShapes); return(Utils.MarshalInvoke(MakeGetter <int>, type, input, iinfo, srcTensorGetters, activeOutputColNames, outputCache)); }
internal static DataViewSchema GetModelSchema(IExceptionContext ectx, Graph graph, string opType = null) { var schemaBuilder = new DataViewSchema.Builder(); foreach (Operation op in graph) { if (opType != null && opType != op.OpType) { continue; } var tfType = op.OutputType(0); // Determine element type in Tensorflow tensor. For example, a vector of floats may get NumberType.R4 here. var mlType = Tf2MlNetTypeOrNull(tfType); // If the type is not supported in ML.NET then we cannot represent it as a column in an Schema. // We also cannot output it with a TensorFlowTransform, so we skip it. // Furthermore, operators which have NumOutputs <= 0 needs to be filtered. // The 'GetTensorShape' method crashes TensorFlow runtime // (https://github.com/dotnet/machinelearning/issues/2156) when the operator has no outputs. if (mlType == null || op.NumOutputs <= 0) { continue; } // There can be at most two metadata fields. // 1. The first field always presents. Its value is this operator's type. For example, // if an output is produced by an "Softmax" operator, the value of this field should be "Softmax". // 2. The second field stores operators whose outputs are consumed by this operator. In other words, // these values are names of some upstream operators which should be evaluated before executing // the current operator. It's possible that one operator doesn't need any input, so this field // can be missing. var metadataBuilder = new DataViewSchema.Annotations.Builder(); // Create the first metadata field. metadataBuilder.Add(TensorflowOperatorTypeKind, TextDataViewType.Instance, (ref ReadOnlyMemory <char> value) => value = op.OpType.AsMemory()); if (op.NumInputs > 0) { // Put upstream operators' names to an array (type: VBuffer) of string (type: ReadOnlyMemory<char>). VBuffer <ReadOnlyMemory <char> > upstreamOperatorNames = default; var bufferEditor = VBufferEditor.Create(ref upstreamOperatorNames, op.NumInputs); for (int i = 0; i < op.NumInputs; ++i) { bufferEditor.Values[i] = op.inputs[i].op.name.AsMemory(); } upstreamOperatorNames = bufferEditor.Commit(); // Used in metadata's getter. // Create the second metadata field. metadataBuilder.Add(TensorflowUpstreamOperatorsKind, new VectorDataViewType(TextDataViewType.Instance, op.NumInputs), (ref VBuffer <ReadOnlyMemory <char> > value) => { upstreamOperatorNames.CopyTo(ref value); }); } // Construct the final ML.NET type of a Tensorflow variable. var tensorShape = op.output.TensorShape.dims; if (tensorShape == null) { // primitive column type schemaBuilder.AddColumn(op.name, mlType, metadataBuilder.ToAnnotations()); } else { // vector column type DataViewType columnType = new VectorDataViewType(mlType); if (!(Utils.Size(tensorShape) == 1 && tensorShape[0] <= 0) && (Utils.Size(tensorShape) > 0 && tensorShape.Skip(1).All(x => x > 0))) { columnType = new VectorDataViewType(mlType, tensorShape[0] > 0 ? tensorShape : tensorShape.Skip(1).ToArray()); } schemaBuilder.AddColumn(op.name, columnType, metadataBuilder.ToAnnotations()); } } return(schemaBuilder.ToSchema()); }