public NeuralLayer(MathOperationManager mathManager, float[] biasData, int neuronCount, int batchSize, bool isInputLayer = false, bool isOutputLayer = false) { this.IsInputLayer = isInputLayer; this.IsOutputLayer = isOutputLayer; this.Data = mathManager.CreateMatrix(neuronCount, batchSize); this.Bias = mathManager.CreateMatrix(biasData, neuronCount); this.ErrorGradient = mathManager.CreateMatrix(neuronCount, batchSize); }
public NeuralLayer(MathOperationManager mathManager, int neuronCount, int batchSize, bool isInputLayer = false, bool isOutputLayer = false) { this.IsInputLayer = isInputLayer; this.IsOutputLayer = isOutputLayer; this.Data = mathManager.CreateMatrix(neuronCount, batchSize); this.Bias = mathManager.CreateMatrix(ArrayUtilities.GetArrayWithRandomValues(neuronCount), neuronCount); this.ErrorGradient = mathManager.CreateMatrix(neuronCount, batchSize); }
public Matrix(MathOperationManager mathManager, int row, int column = 1) { this.mathManager = mathManager; this.Row = row; this.Column = column; this.isDeviceDataInitialized = false; this.HostData = new float[this.Row * this.Column]; this.SetZero(); }
public Matrix(MathOperationManager mathManager, float[] value, int row, int column = 1, bool skipMajorConversion = true) { if (value.Count() != row * column) { throw new ArgumentException("Length of data != row*column"); } this.mathManager = mathManager; this.Row = row; this.Column = column; this.isDeviceDataInitialized = false; this.HostData = new float[this.Row * this.Column]; SetValue(value, skipMajorConversion); }
public static void CrossEntropyErrorAndDerivative(MathOperationManager mathManager, Matrix preSigmoidScores, Matrix trueLabels, Matrix outputErrorDerivative, ref float errorAvg) { using (Matrix sigmoidScores = mathManager.CreateMatrix(preSigmoidScores.Row, preSigmoidScores.Column), outputError = mathManager.CreateMatrix(preSigmoidScores.Row, preSigmoidScores.Column)) { // Calculate sigmoid mathManager.GlobalInstance.MatrixSigmoid(preSigmoidScores, sigmoidScores); // Get cross entropy error mathManager.GlobalInstance.MatrixCrossEntropyError(sigmoidScores, trueLabels, outputError); errorAvg = outputError.GetValue().Sum() / preSigmoidScores.Column; // calculate error derivative = sigmoidScores - trueLabels + (regularizationParameter * sigmoidScores) mathManager.GlobalInstance.MatrixAddition(sigmoidScores, MatrixTransformation.None, 1, trueLabels, MatrixTransformation.None, -1, outputErrorDerivative); } }
public static void BellmanLossAndDerivative(MathOperationManager mathManager, Matrix predictedQValues, Matrix QHatValues, Matrix chosenActionIndices, Matrix currentRewards, ref float errorAvg, Matrix errorDerivative, float discountFactor, Matrix isLastEpisode) { using (Matrix maxQHatValues = mathManager.CreateMatrix(predictedQValues.Column), errorMatrix = mathManager.CreateMatrix(predictedQValues.Column)) { // Calculate column-wise max of the QHat values mathManager.GlobalInstance.ColumnWiseMax(QHatValues, maxQHatValues); // Calculate error & derivative mathManager.GlobalInstance.MatrixBellmanErrorAndDerivative(predictedQValues, maxQHatValues, chosenActionIndices, currentRewards, errorMatrix, errorDerivative, discountFactor, isLastEpisode); errorAvg = errorMatrix.GetValue().Sum() / predictedQValues.Column; } }
public DQN(MathOperationManager mathManager, DQNNeuralNetworkConfiguration configuration) { if (configuration.LossFunction != LossFunctionType.BellmanError) { throw new ArgumentException("DQN only supports Bellman error. Pls check the configuration passed in."); } base.CreateNeuralNetwork(mathManager, configuration); this.DQNConfiguration = configuration; this.gradientStepCount = 0; var nnStore = this.CreateNeuralNetworkStore(); this.QHat = new NeuralNetwork(this.mathManager, nnStore); }
private static void DQN(IEnumerable<BatchInputWrapper> trainData, IEnumerable<BatchInputWrapper> cvData) { using (MathOperationManager mathManager = new MathOperationManager(MathType.GPU)) { var hiddenLayers = new List<int>(); hiddenLayers.Add(10); hiddenLayers.Add(5); DQNNeuralNetworkConfiguration config = new DQNNeuralNetworkConfiguration(5, hiddenLayers, 12); config.LossFunction = LossFunctionType.BellmanError; config.Epochs = 20; config.StepSize = (float)0.1; using (DQN nn = new DQN(mathManager, config)) { nn.MiniBatchStochasticGradientDescent(trainData, cvData); } } }
private static void DNN(IEnumerable<BatchInputWrapper> trainData, IEnumerable<BatchInputWrapper> cvData) { using (MathOperationManager mathManager = new MathOperationManager(MathType.GPU)) { var hiddenLayers = new List<int>(); hiddenLayers.Add(100); hiddenLayers.Add(100); NeuralNetworkConfiguration config = new NeuralNetworkConfiguration(784, hiddenLayers, 10); config.Epochs = 100; config.StepSize = (float)1.5; //config.Activation = NeuronActivationType.ReLu; using (NeuralNetwork dnn = new NeuralNetwork(mathManager, config)) { dnn.MiniBatchStochasticGradientDescent(trainData, cvData); } } }
public static void DqnStanfordEvaluation(MathOperationManager mathManager, Matrix predictedQValues, Matrix chosenActionIndices, Matrix currentRewards, ref float matchPredictRewardSum, ref int matchPredictRewardCount, ref float nonMatchPredictRewardSum, ref int nonMatchPredictRewardCount) { float emptyIndicatorValue = float.Epsilon; using (Matrix predictedActionIndices = mathManager.CreateMatrix(predictedQValues.Column), matchPredictrewardMatrix = mathManager.CreateMatrix(Enumerable.Repeat<float>(emptyIndicatorValue, predictedQValues.Column).ToArray(), predictedQValues.Column), nonMatchPredictrewardMatrix = mathManager.CreateMatrix(Enumerable.Repeat<float>(emptyIndicatorValue, predictedQValues.Column).ToArray(), predictedQValues.Column)) { mathManager.GlobalInstance.ColumnWiseMaxIndex(predictedQValues, predictedActionIndices); mathManager.GlobalInstance.DqnStanfordEvaluation(predictedActionIndices, chosenActionIndices, currentRewards, matchPredictrewardMatrix, nonMatchPredictrewardMatrix); var matchPredictRewards = matchPredictrewardMatrix.GetValue(); matchPredictRewardCount = matchPredictRewards.Count(r => r != emptyIndicatorValue); matchPredictRewardSum = matchPredictRewards.Sum(); var nonMatchPredictRewards = nonMatchPredictrewardMatrix.GetValue(); nonMatchPredictRewardCount = nonMatchPredictRewards.Count(r => r != emptyIndicatorValue); nonMatchPredictRewardSum = nonMatchPredictRewards.Sum(); } }
public NeuralNetwork(MathOperationManager mathManager, string modelPath) { NeuralNetworkStore nnStore = LoadModel(modelPath); this.CreateNeuralNetwork(mathManager, nnStore.Configuration); this.LoadFromNeuralNetworkStore(nnStore); }
public NeuralLink(MathOperationManager mathManager, NeuralLayer layerIn, NeuralLayer layerOut) { this.Weights = mathManager.CreateMatrix(ArrayUtilities.GetArrayWithRandomValues(layerOut.Data.Row * layerIn.Data.Row), layerOut.Data.Row, layerIn.Data.Row); this.ErrorGradient = mathManager.CreateMatrix(layerOut.Data.Row, layerIn.Data.Row); }
public NeuralLink(MathOperationManager mathManager, float[] matrixData, NeuralLayer layerIn, NeuralLayer layerOut) { this.Weights = mathManager.CreateMatrix(matrixData, layerOut.Data.Row, layerIn.Data.Row); this.ErrorGradient = mathManager.CreateMatrix(layerOut.Data.Row, layerIn.Data.Row); }
protected void CreateNeuralNetwork(MathOperationManager mathManager, NeuralNetworkConfiguration configuration) { this.mathManager = mathManager; this.configuration = configuration; this.currentEpoch = 0; this.currentBatchSize = NeuralNetworkConfiguration.defaultBatchSize; this.neuralLayers = new List<NeuralLayer>(); this.biasGradientAccumulator = this.mathManager.CreateMatrix(Enumerable.Repeat<float>(1, this.currentBatchSize).ToArray(), this.currentBatchSize); this.CreateNetworkArchitecture(); }
public NeuralNetwork(MathOperationManager mathManager, NeuralNetworkConfiguration configuration) { this.CreateNeuralNetwork(mathManager, configuration); }
public NeuralNetwork(MathOperationManager mathManager, NeuralNetworkStore nnStore) { this.CreateNeuralNetwork(mathManager, nnStore.Configuration); this.LoadFromNeuralNetworkStore(nnStore); }