internal static Tensor Permute(Tensor inTensor, int[] permutations) // TODO: unify Permute() arguments { var padPermutationsToBarracudaRank = TensorShape.MaxRank - permutations.Length; if (padPermutationsToBarracudaRank > 0) { permutations = permutations.Concat(Enumerable.Range(permutations.Length, padPermutationsToBarracudaRank)).ToArray(); } Debug.Assert(permutations.Length == TensorShape.MaxRank); // See: https://stackoverflow.com/a/32034565 Profiler.BeginSample("ONNXTensor.Permute"); var outTensor = new Tensor(ONNXLayout.Permute(inTensor.shape.ToArray(), permutations)); Debug.Assert(outTensor.length == inTensor.length); // {0, 2, 3, 1} => {0, 3, 1, 2} // {2, 3, 1, 0} => {3, 2, 0, 1} // => {find_index(0), find_index(1), find_index(2), find_index(3)} var reversePermute = new int[permutations.Length]; for (var i = 0; i < permutations.Length; ++i) { reversePermute[i] = Array.IndexOf(permutations, i); } // outTensor strides var tempOutStrides = new int[TensorShape.MaxRank + 1]; tempOutStrides[8] = 1; for (int i = 7; i >= 0; --i) { tempOutStrides[i] = tempOutStrides[i + 1] * outTensor.shape[i]; } var outStride = new int[reversePermute.Length]; for (var i = 0; i < reversePermute.Length; ++i) { outStride[i] = tempOutStrides[reversePermute[i] + 1]; } // inTensor strides var inStrides = new int[TensorShape.MaxRank]; inStrides[7] = 1; for (int i = 6; i >= 0; --i) { inStrides[i] = inStrides[i + 1] * inTensor.shape[i + 1]; } for (var it = new TensorIterator(inTensor.shape); it.IsValid(); ++it) { outTensor[it.d0 * outStride[0] + it.d1 * outStride[1] + it.d2 * outStride[2] + it.d3 * outStride[3] + it.d4 * outStride[4] + it.d5 * outStride[5] + it.d6 * outStride[6] + it.d7 * outStride[7]] = inTensor[ it.d0 * inStrides[0] + it.d1 * inStrides[1] + it.d2 * inStrides[2] + it.d3 * inStrides[3] + it.d4 * inStrides[4] + it.d5 * inStrides[5] + it.d6 * inStrides[6] + it.d7 * inStrides[7]]; } Profiler.EndSample(); return(outTensor); }
public void AddVariable(string nodeId, long[] onnxShape, string onnxLayout) { var onnxRank = onnxShape.Length; var permuatations = ONNXLayout.AxisPermutationsForMappingONNXLayoutToBarracuda(onnxRank, onnxLayout); var barracudaChannelIndex = permuatations.Length - 1; var onnxChannelIndex = permuatations[barracudaChannelIndex]; var channels = (onnxLayout != "?" && onnxChannelIndex >= 0) ? (int)onnxShape[onnxChannelIndex]: -1; var layout = VariableTensor.Layout.Unknown; if (onnxLayout == "NCHW") { layout = VariableTensor.Layout.NCHW; } else if (onnxLayout == "NHWC") { layout = VariableTensor.Layout.NHWC; } variables[nodeId] = new VariableTensor { features = channels, rank = onnxRank, layout = layout, productOfShape = null }; }
public ONNXTensor Permute(int[] permutations) { // transpose both data & shape var transposedData = Permute(m_Data, permutations); var transposedShape = ONNXLayout.Permute(m_Shape, permutations); return(new ONNXTensor(transposedData, transposedShape)); }
// slow version - kept just for performance comparison and validation internal static Tensor PermuteSlow(Tensor readTensor, int[] permutations) // TODO: unify Permute() arguments { var padPermutationsToBarracudaRank = 8 - permutations.Length; if (padPermutationsToBarracudaRank > 0) { permutations = permutations.Concat(Enumerable.Range(permutations.Length, padPermutationsToBarracudaRank)).ToArray(); } Assert.IsTrue(permutations.Length == 8); var outputTensor = new Tensor(ONNXLayout.Permute(readTensor.shape.ToArray(), permutations)); Assert.IsTrue(outputTensor.length == readTensor.length); var inShape = readTensor.shape.ToArray(); for (var s = 0; s < inShape[0]; ++s) { for (var n = 0; n < inShape[1]; ++n) { for (var i0 = 0; i0 < inShape[2]; ++i0) { for (var i1 = 0; i1 < inShape[3]; ++i1) { for (var i2 = 0; i2 < inShape[4]; ++i2) { for (var h = 0; h < inShape[5]; ++h) { for (var w = 0; w < inShape[6]; ++w) { for (var c = 0; c < inShape[7]; ++c) { var it = new int[] { 0, s, n, i0, i1, i2, h, w, c }; // prepend with 0 to handle "new axis" -1 value in permutations var oS = it[permutations[0] + 1]; var oN = it[permutations[1] + 1]; var oI0 = it[permutations[2] + 1]; var oI1 = it[permutations[3] + 1]; var oI2 = it[permutations[4] + 1]; var oH = it[permutations[5] + 1]; var oW = it[permutations[6] + 1]; var oC = it[permutations[7] + 1]; outputTensor[oS, oN, oI0, oI1, oI2, oH, oW, oC] = readTensor[s, n, i0, i1, i2, h, w, c]; } } } } } } } } return(outputTensor); }
public ONNXTensor Reshape(int[] onnxShape) { var symbolicShape = ONNXLayout.ConvertSymbolicShapeToBarracuda(onnxShape, "?"); var reshapedData = m_Data.Reshape(symbolicShape); for (var i = 0; i < onnxShape.Length; ++i) { if (onnxShape[i] < 0) { onnxShape[i] = reshapedData.shape[i]; } Assert.IsTrue(onnxShape[i] == reshapedData.shape[i]); } return(new ONNXTensor(reshapedData, onnxShape)); }
public Tensor ToBarracuda(string onnxLayout) { Profiler.BeginSample("ONNXTensor.ToBarracuda"); if (onnxLayout == "?") { throw new OnnxLayerImportException("Unknown ONNX layout in not supported when converting constant tensor to Barracuda"); } Assert.IsTrue(m_Shape.All(v => v > 0)); var permutations = ONNXLayout.AxisPermutationsForMappingONNXLayoutToBarracuda(rank, onnxLayout); Assert.IsTrue(rank <= permutations.Length); var outTensor = Permute(m_Data, permutations); Profiler.EndSample(); return(outTensor); }
public ONNXTensor(TensorProto onnxTensor) { // read shape var onnxShape = onnxTensor.Dims.Select(v => v <int.MinValue?int.MinValue : v> int.MaxValue ? int.MaxValue : (int)v).ToArray(); if (onnxShape.Any(s => s == 0)) { // empty tensor, not data m_Shape = onnxShape; m_Data = null; } else { // read data var shape = ONNXLayout.ConvertShapeToBarracuda(onnxShape, onnxLayout: "?"); float[] data; if ((onnxTensor.RawData != null) && (!onnxTensor.RawData.IsEmpty)) { var byteArray = new byte[onnxTensor.RawData.Length]; onnxTensor.RawData.CopyTo(byteArray, 0); // Double if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Double) { var typedData = new double[shape.length]; Assert.IsTrue((sizeof(double) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => v <int.MinValue?(float)int.MinValue : v> int.MaxValue ? (float)int.MaxValue : (float)v).ToArray(); } // Float32 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Float) { data = new float[shape.length]; Assert.IsTrue((sizeof(float) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, data, 0, byteArray.Length); } // Float16 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Float16) { var typedData = new UInt16[shape.length]; Assert.IsTrue((sizeof(UInt16) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => HalfHelper.HalfToSingle(v)).ToArray(); } // Int8 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Int8) { var typedData = new sbyte[shape.length]; Assert.IsTrue((sizeof(sbyte) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => (float)v).ToArray(); } // Int16 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Int16) { var typedData = new short[shape.length]; Assert.IsTrue((sizeof(short) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => (float)v).ToArray(); } // Int32 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Int32) { var typedData = new int[shape.length]; Assert.IsTrue((sizeof(int) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => (float)v).ToArray(); } // Int64 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Int64) { var typedData = new long[shape.length]; Assert.IsTrue((sizeof(long) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => v <int.MinValue?(float)int.MinValue : v> int.MaxValue ? (float)int.MaxValue : (float)v).ToArray(); } // UInt8 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Uint8) { var typedData = new byte[shape.length]; Assert.IsTrue((sizeof(byte) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => (float)v).ToArray(); } // UInt16 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Uint16) { var typedData = new ushort[shape.length]; Assert.IsTrue((sizeof(ushort) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => (float)v).ToArray(); } // UInt32 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Uint32) { var typedData = new uint[shape.length]; Assert.IsTrue((sizeof(uint) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => (float)v).ToArray(); } // UInt64 else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Uint64) { var typedData = new ulong[shape.length]; Assert.IsTrue((sizeof(ulong) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => v > uint.MaxValue ? (float)uint.MaxValue : (float)v).ToArray(); } // Bool else if (onnxTensor.DataType == (int)TensorProto.Types.DataType.Bool) { var typedData = new bool[shape.length]; Assert.IsTrue((sizeof(bool) * shape.length) == onnxTensor.RawData.Length); Buffer.BlockCopy(byteArray, 0, typedData, 0, byteArray.Length); data = typedData.Select(v => v ? 1.0f : 0.0f).ToArray(); } else { throw new OnnxLayerImportException($"Tensor data type {(TensorProto.Types.DataType)onnxTensor.DataType} is not supported."); } } // Float32 else if ((onnxTensor.FloatData != null) && (onnxTensor.FloatData.Count != 0)) { Assert.IsTrue(shape.length == onnxTensor.FloatData.Count); data = new float[shape.length]; onnxTensor.FloatData.CopyTo(data, 0); } // Int32 else if ((onnxTensor.Int32Data != null) && (onnxTensor.Int32Data.Count != 0)) { Assert.IsTrue(shape.length == onnxTensor.Int32Data.Count); data = onnxTensor.Int32Data.Select(v => (float)v).ToArray(); } // Int64 else if ((onnxTensor.Int64Data != null) && (onnxTensor.Int64Data.Count != 0)) { Assert.IsTrue(shape.length == onnxTensor.Int64Data.Count); data = onnxTensor.Int64Data.Select(v => v <int.MinValue?(float)int.MinValue : v> int.MaxValue ? (float)int.MaxValue : (float)v).ToArray(); } else { throw new OnnxLayerImportException("Could not read tensor data for constant tensor."); } m_Data = new Tensor(shape, new SharedArrayTensorData(data)); m_Shape = onnxShape; } }