public void InputLayer_Backward() { var batchSize = 1; var width = 28; var height = 28; var depth = 3; var random = new Random(232); var fanIn = width * height * depth; var sut = new InputLayer(height, width, depth); sut.Initialize(1, 1, 1, batchSize, Initialization.GlorotUniform, random); var input = Matrix <float> .Build.Random(batchSize, fanIn, random.Next()); sut.Forward(input); var delta = Matrix <float> .Build.Random(batchSize, fanIn, random.Next()); var actual = sut.Backward(delta); var expected = delta; MatrixAsserts.AreEqual(expected, actual); }
public void ForwardPropagation() { InputLayer.Forward(); for (int i = 0; i < Layers.Count; i++) { Layers[i].Forward(); } }
public void Forward(double[] input) { _inputLayer.SetInput(input); _inputLayer.Forward(); foreach (var layer in _layers) { layer.Forward(); } }
public void ForwardPropagation() { InputLayer.Forward(); foreach (MyAbstractFBLayer layer in Layers) { layer.Forward(); } // Copy the end layer output to the node output m_copyKernel.SetupExecution(LastLayer.Output.Count); m_copyKernel.Run(LastLayer.Output.Ptr, 0, CurrentSampleOutputPtr, 0, LastLayer.Output.Count); SamplesProcessed++; }
public void ForwardPropagation() { switch (AutoencoderTask.NetworkMode) { case MyAutoencoderMode.TRAINING: case MyAutoencoderMode.FORWARD_PASS: InputLayer.Forward(); foreach (MyAbstractFBLayer layer in Layers) { layer.Forward(); } // Copy the end layer output to the node output m_copyKernel.SetupExecution(LastLayer.Output.Count); m_copyKernel.Run(LastLayer.Output.Ptr, 0, CurrentSampleOutputPtr, 0, LastLayer.Output.Count); SamplesProcessed++; break; case MyAutoencoderMode.FEATURE_ENCODING: InputLayer.Forward(); for (int i = 0; i <= FeatureLayerPosition; i++) { Layers[i].Forward(); } // Copy the featureLayer to feature output m_copyKernel.SetupExecution(FeatureLayer.Output.Count); m_copyKernel.Run(FeatureLayer.Output.Ptr, 0, CurrentSampleFeatureOutputPtr, 0, FeatureLayer.Output.Count); SamplesProcessed++; break; case MyAutoencoderMode.FEATURE_DECODING: // Copy the feature input to the feature layer m_copyKernel.SetupExecution(FeatureLayer.Output.Count); m_copyKernel.Run(CurrentSampleFeatureInputPtr, 0, FeatureLayer.Output.Ptr, 0, FeatureLayer.Output.Count); for (int i = FeatureLayerPosition + 1; i < Layers.Count; i++) { Layers[i].Forward(); } // Copy the end layer output to the node output m_copyKernel.SetupExecution(LastLayer.Output.Count); m_copyKernel.Run(LastLayer.Output.Ptr, 0, CurrentSampleOutputPtr, 0, LastLayer.Output.Count); SamplesProcessed++; break; } }
public void ForwardBackwardTest1() { Shape shape = new Shape(Shape.BWHC, -1, 20, 15, 10); InputLayer layer = new InputLayer(shape); for (int i = 1; i <= 3; i++) { Tensor x = new Tensor(null, shape.Reshape(Axis.B, i)); x.Randomize(); IList <Tensor> xs = new[] { x }; IList <Tensor> ys = layer.Forward(null, xs); Assert.IsTrue(xs[0] == ys[0]); } }
public void ForwardTest2() { Shape shape = new Shape(new[] { 1, 20, 15, 10 }); InputLayer layer = new InputLayer(shape); try { Tensor x = new Tensor(null, shape); layer.Forward(null, new[] { x, x }); } catch (ArgumentException e) { Assert.AreEqual(Properties.Resources.E_InvalidInputTensor_InvalidCount, e.Message); throw; } }
public void ForwardTest1() { Shape shape = new Shape(new[] { 1, 20, 15, 10 }); InputLayer layer = new InputLayer(shape); try { Tensor x = new Tensor(null, shape); x.Randomize(); IList <Tensor> xs = new[] { x, x }; layer.Forward(null, xs); } catch (ArgumentException e) { Assert.AreEqual( new ArgumentException(Properties.Resources.E_InvalidInputTensor_InvalidCount).Message, e.Message); throw; } }
public void ForwardTest3() { Shape shape = new Shape(Shape.BWHC, -1, 20, 15, 10); InputLayer layer = new InputLayer(shape); try { Tensor x = new Tensor(null, new[] { 1, 2, 3 }); layer.Forward(null, new[] { x }); } catch (ArgumentException e) { Assert.AreEqual( new ArgumentException(string.Format( CultureInfo.InvariantCulture, Properties.Resources.E_InvalidInputTensor_InvalidRank, 3, 4)).Message, e.Message); throw; } }