public void Think_CausesEachNeuronInLayerToThink()
        {
            Layer layer = new Layer(numberOfNeurons: 2);

            // Give each neuron one input and one output.
            layer.Neurons[0].Inputs.Add(new Synapse { Weight = 1, Value = 4 });
            layer.Neurons[1].Inputs.Add(new Synapse { Weight = 2, Value = 3 });
            layer.Neurons[0].Outputs.Add(new Synapse());
            layer.Neurons[1].Outputs.Add(new Synapse());

            // Reset the biases since we know they are randomized, and we want to disregard them.
            layer.Neurons[0].Bias = layer.Neurons[1].Bias = 0;

            // Execute the code to test.
            layer.Think();

            // Validate that each neuron sets the value for its output.
            // -4 is the sum of the neurons bias and all its inputs weights times their value.
            Assert.AreEqual(
                1 / (1 + Math.Pow(Math.E, -4)),
                layer.Neurons[0].Outputs[0].Value,
                "First neuron output");

            // -6 is the sum of the neurons bias and all its inputs weights times their value.
            Assert.AreEqual(
                1 / (1 + Math.Pow(Math.E, -6)),
                layer.Neurons[1].Outputs[0].Value,
                "Second neuron output");
        }