Exemple #1
0
        private Vector <double> FeedForward(Vector <double> inputs)
        {
            this.inputs = inputs;

            Vector <double> z = (inputs * Weights) + Biases;

            outputs = OutputActivation.Transform(z);

            return((nextLayer != null) ? nextLayer.FeedForward(outputs) : outputs);
        }
Exemple #2
0
        /// <summary>
        /// Calculates the gradient for the current layer based on the gradients and input weights of the next layer
        /// in the neural network.
        /// </summary>
        /// <param name="delC_delA">Derivative of cost w.r.t. the hidden layer output</param>
        /// <remarks>
        /// Gradients are a measure of how far off, and in what direction (positive or negative) the current layer's
        /// output values are.
        /// </remarks>
        private void CalculateGradients(Vector <double> delC_delA)
        {
            Vector <double> delA_delZ  = OutputActivation.Derivative(outputs);
            Vector <double> nodeDeltas = delA_delZ.PointwiseMultiply(delC_delA);

            WeightGradients = CalculateWeightGradients(nodeDeltas);
            BiasGradients   = CalculateBiasGradients(nodeDeltas);

            if (!IsFirstHiddenLayer)
            {
                PreviousLayerActivationGradients = CalculatePreviousLayerActivationGradients(nodeDeltas);
            }
        }
    protected override string?PropertyValidation(PropertyInfo pi)
    {
        if (pi.Name == nameof(SaveValidationProgressEvery))
        {
            if (SaveValidationProgressEvery % SaveProgressEvery != 0)
            {
                return(PredictorMessage._0ShouldBeDivisibleBy12.NiceToString(pi.NiceName(), ReflectionTools.GetPropertyInfo(() => SaveProgressEvery).NiceName(), SaveProgressEvery));
            }
        }

        if (pi.Name == nameof(OutputActivation))
        {
            if (OutputActivation == NeuralNetworkActivation.ReLU || OutputActivation == NeuralNetworkActivation.Sigmoid)
            {
                var p      = this.GetParentEntity <PredictorEntity>();
                var errors = p.MainQuery.Columns.Where(a => a.Usage == PredictorColumnUsage.Output && a.Encoding.Is(DefaultColumnEncodings.NormalizeZScore)).Select(a => a.Token).ToList();
                errors.AddRange(p.SubQueries.SelectMany(sq => sq.Columns).Where(a => a.Usage == PredictorSubQueryColumnUsage.Output && a.Encoding.Is(DefaultColumnEncodings.NormalizeZScore)).Select(a => a.Token).ToList());

                if (errors.Any())
                {
                    return(PredictorMessage._0CanNotBe1Because2Use3.NiceToString(pi.NiceName(), OutputActivation.NiceToString(), errors.CommaAnd(), DefaultColumnEncodings.NormalizeZScore.NiceToString()));
                }
            }
        }

        string?Validate(NeuralNetworkEvalFunction function)
        {
            bool lossIsClassification = function == NeuralNetworkEvalFunction.sigmoid_cross_entropy_with_logits || function == NeuralNetworkEvalFunction.ClassificationError;
            bool typeIsClassification = this.PredictionType == PredictionType.Classification || this.PredictionType == PredictionType.MultiClassification;

            if (lossIsClassification != typeIsClassification)
            {
                return(PredictorMessage._0IsNotCompatibleWith12.NiceToString(function.NiceToString(), this.NicePropertyName(a => a.PredictionType), this.PredictionType.NiceToString()));
            }

            return(null);
        }

        if (pi.Name == nameof(LossFunction))
        {
            return(Validate(LossFunction));
        }

        if (pi.Name == nameof(EvalErrorFunction))
        {
            return(Validate(EvalErrorFunction));
        }

        return(base.PropertyValidation(pi));
    }