private void CalculateMatrixTrain() { trainY = MatrixCal.MatrixCreate(nTrain, 1); for (int i = 0; i < nTrain; i++) { trainA[i] = new double[nFeature + 1]; trainA[i][0] = 1; for (int j = 0; j < 8; j++) { trainA[i][j + 1] = trainData[i][j]; } trainY[i][0] = trainData[i][8]; } trainAT = MatrixCal.MatrixTranspose(trainA); trainATA = MatrixCal.MatrixProduct(trainAT, trainA); }
private void CalculateResult() { CalculateRidgeRegression(lamdaVal[nMinIndex]); testY = MatrixCal.MatrixCreate(nTest, 1); labelResult.Text = ""; for (int i = 0; i < nTest; i++) { double[][] testAi = new double[1][]; testAi[0] = new double[nFeature + 1]; for (int j = 0; j < nFeature + 1; j++) { testAi[0][j] = testA[i][j]; } double[][] testAiw = MatrixCal.MatrixProduct(testAi, matrixWeight); testY[i][0] = testAiw[0][0]; labelResult.Text += testY[i][0].ToString() + "\n"; } }
private void CalculateLoss(double ld) { lamdaVal.Add(ld); //train loss double trainLoss = 0; for (int i = 0; i < nTrain; i++) { double[][] trainAi = new double[1][]; trainAi[0] = new double[nFeature + 1]; for (int j = 0; j < nFeature + 1; j++) { trainAi[0][j] = trainA[i][j]; } double[][] trainAiw = MatrixCal.MatrixProduct(trainAi, matrixWeight); trainLoss += Math.Pow(trainY[i][0] - trainAiw[0][0], 2); } trainLossVal.Add(trainLoss / (double)nTrain); //valid loss double validLoss = 0; for (int i = 0; i < nVal; i++) { double[][] valAi = new double[1][]; valAi[0] = new double[nFeature + 1]; for (int j = 0; j < nFeature + 1; j++) { valAi[0][j] = validA[i][j]; } double[][] valAiw = MatrixCal.MatrixProduct(valAi, matrixWeight); validLoss += Math.Pow(validY[i][0] - valAiw[0][0], 2); } valLossVal.Add(validLoss / (double)nVal); }