private Layer Conv(string name, Layer.Type convType, object input, Int32[] stride, Int32[] pad, Int32[] outputPad, Tensor kernel, Tensor bias)
        {
            Layer layer = new Layer(name, convType);

            layer.pad                         = pad;
            layer.stride                      = stride;
            layer.pool                        = outputPad;
            layer.inputs                      = new [] { ResolveInput(input) };
            layer.datasets                    = new Layer.DataSet[2];
            layer.datasets[0].name            = $"{name}/K";
            layer.datasets[0].shape           = kernel.shape;
            layer.datasets[0].itemSizeInBytes = 4;
            layer.datasets[0].length          = kernel.shape.length;
            layer.datasets[0].offset          = 0;
            layer.datasets[1].name            = $"{name}/B";
            layer.datasets[1].shape           = bias.shape;
            layer.datasets[1].itemSizeInBytes = 4;
            layer.datasets[1].length          = bias.shape.length;
            layer.datasets[1].offset          = kernel.shape.length;
            layer.weights                     = new float[kernel.shape.length + bias.shape.length];

            kernel.ToReadOnlyArray().CopyTo(layer.weights, 0);
            bias.ToReadOnlyArray().CopyTo(layer.weights, layer.datasets[1].offset);

            m_Model.layers.Add(layer);

            return(layer);
        }
        /// <summary>
        /// Apply a densely connected layer (aka general matrix multiplication or GEMM)
        /// Bias should be a tensor with (batch == input.shape[H] * input.shape[W] * input.shape[C]) and only one other dimensions of size > 1
        /// Weight should be a tensor with (batch == 1) and (height * width * channels == bias.shape[B] * )
        ///
        /// Output shape is [input.shape[B], 1, 1, Weight.shape[H]*Weight.shape[W]*Weight.shape[C]]
        /// </summary>
        public Layer Dense(string name, object input, Tensor weight, Tensor bias)
        {
            Layer layer = new Layer(name, Layer.Type.Dense);

            layer.inputs                      = new [] { ResolveInput(input) };
            layer.datasets                    = new Layer.DataSet[2];
            layer.datasets[0].name            = $"{name}/W";
            layer.datasets[0].shape           = weight.shape;
            layer.datasets[0].itemSizeInBytes = 4;
            layer.datasets[0].length          = weight.shape.length;
            layer.datasets[0].offset          = 0;
            layer.datasets[1].name            = $"{name}/B";
            layer.datasets[1].shape           = bias.shape;
            layer.datasets[1].itemSizeInBytes = 4;
            layer.datasets[1].length          = bias.shape.length;
            layer.datasets[1].offset          = weight.shape.length;
            layer.weights                     = new float[weight.shape.length + bias.shape.length];

            weight.ToReadOnlyArray().CopyTo(layer.weights, 0);
            bias.ToReadOnlyArray().CopyTo(layer.weights, layer.datasets[1].offset);

            m_Model.layers.Add(layer);

            return(layer);
        }
        /// <summary>
        /// Carries out instance normalization as described in the paper https://arxiv.org/abs/1607.08022
        /// y = scale * (x - mean) / sqrt(variance + epsilon) + bias, where mean and variance are computed per instance per channel.
        /// Scale and bias should be tensors of shape [1,1,1, input.shape[C]]
        ///
        /// Output shape is same as input.
        /// </summary>
        public Layer Normalization(string name, object input, Tensor scale, Tensor bias, float epsilon = 1e-5f)
        {
            Layer layer = new Layer(name, Layer.Type.Normalization);

            layer.inputs                      = new [] { ResolveInput(input) };
            layer.datasets                    = new Layer.DataSet[2];
            layer.datasets[0].name            = $"{name}/S";
            layer.datasets[0].shape           = scale.shape;
            layer.datasets[0].itemSizeInBytes = 4;
            layer.datasets[0].length          = scale.shape.length;
            layer.datasets[0].offset          = 0;
            layer.datasets[1].name            = $"{name}/B";
            layer.datasets[1].shape           = bias.shape;
            layer.datasets[1].itemSizeInBytes = 4;
            layer.datasets[1].length          = bias.shape.length;
            layer.datasets[1].offset          = scale.shape.length;
            layer.weights                     = new float[scale.shape.length + bias.shape.length];
            layer.beta = epsilon;

            scale.ToReadOnlyArray().CopyTo(layer.weights, 0);
            bias.ToReadOnlyArray().CopyTo(layer.weights, layer.datasets[1].offset);

            m_Model.layers.Add(layer);

            return(layer);
        }
예제 #4
0
        /// <summary>
        /// Converts Tensor to DataSet
        /// </summary>
        /// <param name="X">input `Tensor`</param>
        /// <param name="index">dataset index</param>
        public void ApplyTensorToDataSet(Tensor X, int index)
        {
            Assert.IsTrue(index < datasets.Length);
            var ds = datasets[index];

            ds.shape = X.shape;
            Array.Copy(X.ToReadOnlyArray(), 0, weights, ds.offset, ds.shape.length);
            datasets[index] = ds;
        }
        /// <summary>
        /// Allow to load a tensor from constants.
        /// </summary>
        public Layer Const(string name, Tensor tensor, int insertionIndex = -1)
        {
            Layer layer = new Layer(name, Layer.Type.Load);

            layer.datasets                    = new Layer.DataSet[1];
            layer.datasets[0].name            = name;
            layer.datasets[0].shape           = tensor.shape;
            layer.datasets[0].itemSizeInBytes = 4;
            layer.datasets[0].length          = tensor.shape.length;
            layer.datasets[0].offset          = 0;
            layer.weights = new float[tensor.shape.length];
            tensor.ToReadOnlyArray().CopyTo(layer.weights, 0);

            if (insertionIndex < 0 || insertionIndex >= m_Model.layers.Count)
            {
                m_Model.layers.Add(layer);
            }
            else
            {
                m_Model.layers.Insert(insertionIndex, layer);
            }

            return(layer);
        }
예제 #6
0
 /// <summary>
 /// Return Tensor data as int array (slow operation), this will create a blocking read operation
 /// </summary>
 /// <param name="x">Tensor</param>
 /// <returns>Tensor data as int array</returns>
 static public int[] AsInts(this Tensor x)
 {
     return(Array.ConvertAll(x.ToReadOnlyArray(), v => v <= (float)int.MinValue ? int.MinValue : v >= (float)int.MaxValue ? int.MaxValue : (int)v));
 }
예제 #7
0
 /// <summary>
 /// Return Tensor data as float array, this will create a blocking read operation
 /// </summary>
 /// <param name="x">Tensor</param>
 /// <returns>Tensor data as float array</returns>
 static public float[] AsFloats(this Tensor x)
 {
     return(x.ToReadOnlyArray());
 }
예제 #8
0
 static public long[] AsLongs(this Tensor x)
 {
     return(Array.ConvertAll(x.ToReadOnlyArray(), (v => (long)v)));
 }
예제 #9
0
 static public int[] AsInts(this Tensor x)
 {
     return(Array.ConvertAll(x.ToReadOnlyArray(), (v => (int)v)));
 }