public int NeuronOutput(int[] hopfieldOutput, int[,] patterns) { int neuron = -1; int[] neuronSimilarityVector = new int[patterns.Rows()]; hopfieldOutput = hopfieldOutput.Add(1).Divide(2).Select(c => Convert.ToInt32(c.ToString())).ToArray(); for (int index = 0; index < patterns.Rows(); index++) { int similarNeuronsCount = 0; for (int k = 0; k < patterns.Columns(); k++) { if (patterns[index, k] == hopfieldOutput[k]) { similarNeuronsCount++; } } neuronSimilarityVector.Set(similarNeuronsCount, index); } neuron = neuronSimilarityVector.IndexOf(neuronSimilarityVector.Max()); return(neuron); }
public void TrainByPseudoInverse(int[,] testData) { testData = testData.Multiply(2).Subtract(1); // macierz -1 i 1 int patternCount = testData.Rows(); //liczba wzorców neuronCount = testData.Columns(); //liczba neuronów we wzorcu double[,] W = new double[neuronCount, neuronCount]; //inicjalizacja macierzy wag 64x64 for (int row = 0; row < patternCount; row++) { double[,] x = testData.Get(row, row + 1, 0, neuronCount).Transpose().Convert(i => (double)i); var x1 = W.Dot(x).Subtract(x); var x1t = x1.Transpose(); var licznik = x1.Dot(x1t); double mianownik = x.TransposeAndDot(x).Subtract(x.Transpose().Dot(W).Dot(x))[0, 0]; W = W.Add(licznik.Divide(mianownik)); } weights = W; trained = true; }
public void Train(int[,] data) { patternsCount = data.Rows(); neuronsCount = data.Columns(); bw = new double[patternsCount, neuronsCount]; //Bottom-up weights. w tw = new double[patternsCount, neuronsCount]; //Top-down weights. v f1a = new int[neuronsCount]; f1b = new int[neuronsCount]; f2 = new double[patternsCount]; // Initialize top-down weight matrix filled by ones V tw.Set(1); // Initialize bottom-up weight matrix. W bw.Set(1.0 / (1.0 + neuronsCount)); for (int row = 0; row < patternsCount; row++) { Console.Write("{0} ", Magic(data.GetRow(row), true)); } trained = true; }
/// <summary> /// Constructs a new Confusion Matrix. /// </summary> /// public ConfusionMatrix(int[,] matrix) { if (matrix.Rows() != 2 || matrix.Columns() != 2) { throw new DimensionMismatchException("matrix"); } this.truePositives = matrix[0, 0]; this.falseNegatives = matrix[0, 1]; this.falsePositives = matrix[1, 0]; this.trueNegatives = matrix[1, 1]; }
static public int[][] convertToJaggedArray(int[,] multiArray) { int numOfColumns = multiArray.Columns(); int numOfRows = multiArray.Rows(); int[][] jaggedArray = new int[numOfRows][]; for (int r = 0; r < numOfRows; r++) { jaggedArray[r] = new int[numOfColumns]; for (int c = 0; c < numOfColumns; c++) { jaggedArray[r][c] = multiArray[r, c]; } } return(jaggedArray); }
public override void NextInit(Platform platform) { // init commandSequence stack if it is null (fix serialization) if (allocationMap == null) { if (GenerateAllocationMap(platform) != 0) { commandSequence.Clear(); //ReplanLocal(platform, platform.FieldOfViewRadius * 2); } } else { bool foundUndiscovered = false; for (int i = 0; i < allocationMap.Rows(); i++) { for (int j = 0; j < allocationMap.Columns(); j++) { Pose p = new Pose(i, j); if ((allocationMap[i, j] == platform.ID) && (!platform.Map.IsPlaceDiscovered(p, platform))) { foundUndiscovered = true; break; } } } if (!foundUndiscovered) { if (GenerateAllocationMap(platform) != 0) { //commandSequence.Clear(); //ReplanLocal(platform, platform.FieldOfViewRadius * 2); } } } }
/// <summary> /// This method should be implemented by inheriting classes to implement the /// actual feature extraction, transforming the input image into a list of features. /// </summary> /// protected override IEnumerable <FeatureDescriptor> InnerTransform(UnmanagedImage image) { // make sure we have grayscale image UnmanagedImage grayImage = null; if (image.PixelFormat == PixelFormat.Format8bppIndexed) { grayImage = image; } else { // create temporary grayscale image grayImage = Grayscale.CommonAlgorithms.BT709.Apply(image); } // get source image size int width = grayImage.Width; int height = grayImage.Height; int stride = grayImage.Stride; int offset = stride - width; // 1. Calculate 8-pixel neighborhood binary patterns if (patterns == null || height > patterns.GetLength(0) || width > patterns.GetLength(1)) { patterns = new int[height, width]; } else { System.Diagnostics.Debug.Write(String.Format("Reusing storage for patterns. " + "Need ({0}, {1}), have ({1}, {2})", height, width, patterns.Rows(), patterns.Columns())); } unsafe { fixed(int *ptrPatterns = patterns) { // Begin skipping first line byte *src = (byte *)grayImage.ImageData.ToPointer() + stride; int * neighbors = ptrPatterns + width; // for each line for (int y = 1; y < height - 1; y++) { // skip first column neighbors++; src++; // for each inner pixel in line (skipping first and last) for (int x = 1; x < width - 1; x++, src++, neighbors++) { // Retrieve the pixel neighborhood byte a11 = src[+stride + 1], a12 = src[+1], a13 = src[-stride + 1]; byte a21 = src[+stride + 0], a22 = src[0], a23 = src[-stride + 0]; byte a31 = src[+stride - 1], a32 = src[-1], a33 = src[-stride - 1]; int sum = 0; if (a22 < a11) { sum += 1 << 0; } if (a22 < a12) { sum += 1 << 1; } if (a22 < a13) { sum += 1 << 2; } if (a22 < a21) { sum += 1 << 3; } if (a22 < a23) { sum += 1 << 4; } if (a22 < a31) { sum += 1 << 5; } if (a22 < a32) { sum += 1 << 6; } if (a22 < a33) { sum += 1 << 7; } *neighbors = sum; } // Skip last column neighbors++; src += offset + 1; } } } // Free some resources which wont be needed anymore if (image.PixelFormat != PixelFormat.Format8bppIndexed) { grayImage.Dispose(); } // 2. Compute cell histograms int cellCountX; int cellCountY; if (cellSize > 0) { cellCountX = (int)Math.Floor(width / (double)cellSize); cellCountY = (int)Math.Floor(height / (double)cellSize); if (histograms == null || cellCountX > histograms.Rows() || cellCountY > histograms.Columns()) { this.histograms = new int[cellCountX, cellCountY][]; for (int i = 0; i < cellCountX; i++) { for (int j = 0; j < cellCountY; j++) { this.histograms[i, j] = new int[numberOfBins]; } } } else { System.Diagnostics.Debug.Write(String.Format("Reusing storage for histograms. " + "Need ({0}, {1}), have ({1}, {2})", cellCountX, cellCountY, histograms.Rows(), histograms.Columns())); } // For each cell for (int i = 0; i < cellCountX; i++) { for (int j = 0; j < cellCountY; j++) { // Compute the histogram int[] histogram = this.histograms[i, j]; int startCellX = i * cellSize; int startCellY = j * cellSize; // for each pixel in the cell for (int x = 0; x < cellSize; x++) { for (int y = 0; y < cellSize; y++) { histogram[patterns[startCellY + y, startCellX + x]]++; } } } } } else { cellCountX = 1; cellCountY = 1; if (histograms == null) { this.histograms = new int[, ][] { { new int[numberOfBins] } }; } else { System.Diagnostics.Debug.Write(String.Format("Reusing storage for histograms. " + "Need ({0}, {1}), have ({1}, {2})", cellCountX, cellCountY, histograms.Rows(), histograms.Columns())); } int[] histogram = this.histograms[0, 0]; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { histogram[patterns[i, j]]++; } } } // 3. Group the cells into larger, normalized blocks int blocksCountX; int blocksCountY; if (blockSize > 0) { blocksCountX = (int)Math.Floor(cellCountX / (double)blockSize); blocksCountY = (int)Math.Floor(cellCountY / (double)blockSize); } else { blockSize = blocksCountX = blocksCountY = 1; } var blocks = new List <FeatureDescriptor>(); for (int i = 0; i < blocksCountX; i++) { for (int j = 0; j < blocksCountY; j++) { double[] block = new double[blockSize * blockSize * numberOfBins]; int startBlockX = i * blockSize; int startBlockY = j * blockSize; int c = 0; // for each cell in the block for (int x = 0; x < blockSize; x++) { for (int y = 0; y < blockSize; y++) { int[] histogram = histograms[startBlockX + x, startBlockY + y]; // Copy all histograms to the block vector for (int k = 0; k < histogram.Length; k++) { block[c++] = histogram[k]; } } } // TODO: Remove this block and instead propose a general architecture // for applying normalizations to descriptor blocks if (normalize) { block.Divide(block.Euclidean() + epsilon, result: block); } blocks.Add(block); } } return(blocks); }
public Boolean Classification(int[,] AddedAreaPoints, string ImageID) { // Bitmap DestinationImage = new Bitmap(28, 28); /// To see Result of Scaling Bitmap DestinationImage = new Bitmap(AddedAreaPoints.Rows(), AddedAreaPoints.Columns()); int[,] DestinationPoints = new int[28, 28]; /// To see Result of Scaling double XConvertor = (double)(AddedAreaPoints.Rows()) / (double)(28); double YConvertor = (double)(AddedAreaPoints.Columns()) / (double)(28); double[] input = new double[784]; for (int i = 0; i < 28; i++) { for (int j = 0; j < 28; j++) { double XInSourceImage = (double)(XConvertor * i); double YInSourceImage = (double)(YConvertor * j); int X = (int)(Math.Floor(XInSourceImage)); int Y = (int)(Math.Floor(YInSourceImage)); input[i * 28 + j] = AddedAreaPoints[X, Y]; DestinationPoints[i, j] = AddedAreaPoints[X, Y]; } } // for (int i = 0; i < 28; i++) for (int i = 0; i < AddedAreaPoints.Rows(); i++) { // for (int j = 0; j < 28; j++) for (int j = 0; j < AddedAreaPoints.Columns(); j++) { // if (DestinationPoints[i, j] == 1) if (AddedAreaPoints[i, j] == 1) { DestinationImage.SetPixel(i, j, Color.Black); } else { DestinationImage.SetPixel(i, j, Color.White); } } } DestinationImage.Save(@"C:\Users\nhonarva\Documents\ResultsOfScaling\scaling" + ImageID + ".png"); var image = Pix.LoadFromFile(@"C:\Users\nhonarva\Documents\ResultsOfScaling\scaling" + ImageID + ".png"); // Page page; page = _engine.Process(image, PageSegMode.SingleBlock); string text = page.GetText(); double confidence = page.GetMeanConfidence(); page.Dispose(); int actual = (int)ksvm.Compute(input); if (actual == 1) { return(true); } else { return(false); } }
public void learn_test() { #region doc_main // Fix the random number generator Accord.Math.Random.Generator.Seed = 0; // In this example, we will be using the QLearning algorithm // to make a robot learn how to navigate a map. The map is // shown below, where a 1 denotes a wall and 0 denotes areas // where the robot can navigate: // int[,] map = { { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 1, 1, 0, 0, 0, 0, 0, 0, 1 }, { 1, 1, 0, 0, 0, 1, 1, 0, 1 }, { 1, 0, 0, 1, 0, 0, 0, 0, 1 }, { 1, 0, 0, 1, 1, 1, 1, 0, 1 }, { 1, 0, 0, 1, 1, 0, 0, 0, 1 }, { 1, 1, 0, 1, 0, 0, 0, 0, 1 }, { 1, 1, 0, 1, 0, 1, 1, 0, 1 }, { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, }; // Now, we define the initial and target points from which the // robot will be spawn and where it should go, respectively: int agentStartX = 1; int agentStartY = 4; int agentStopX = 7; int agentStopY = 4; // The robot is able to sense the environment though 8 sensors // that capture whether the robot is near a wall or not. Based // on the robot's current location, the sensors will return an // integer number representing which sensors have detected walls Func <int, int, int> getState = (int x, int y) => { int c1 = (map[y - 1, x - 1] != 0) ? 1 : 0; int c2 = (map[y - 1, x + 0] != 0) ? 1 : 0; int c3 = (map[y - 1, x + 1] != 0) ? 1 : 0; int c4 = (map[y + 0, x + 1] != 0) ? 1 : 0; int c5 = (map[y + 1, x + 1] != 0) ? 1 : 0; int c6 = (map[y + 1, x + 0] != 0) ? 1 : 0; int c7 = (map[y + 1, x - 1] != 0) ? 1 : 0; int c8 = (map[y + 0, x - 1] != 0) ? 1 : 0; return(c1 | (c2 << 1) | (c3 << 2) | (c4 << 3) | (c5 << 4) | (c6 << 5) | (c7 << 6) | (c8 << 7)); }; // The actions are the possible directions the robot can go: // // - case 0: go to north (up) // - case 1: go to east (right) // - case 2: go to south (down) // - case 3: go to west (left) // int learningIterations = 1000; double explorationRate = 0.5; double learningRate = 0.5; double moveReward = 0; double wallReward = -1; double goalReward = 1; // The function below specifies how the robot should perform an action given its // current position and an action number. This will cause the robot to update its // current X and Y locations given the direction (above) it was instructed to go: Func <int, int, int, Tuple <double, int, int> > doAction = (int currentX, int currentY, int action) => { // default reward is equal to moving reward double reward = moveReward; // moving direction int dx = 0, dy = 0; switch (action) { case 0: // go to north (up) dy = -1; break; case 1: // go to east (right) dx = 1; break; case 2: // go to south (down) dy = 1; break; case 3: // go to west (left) dx = -1; break; } int newX = currentX + dx; int newY = currentY + dy; // check new agent's coordinates if ((map[newY, newX] != 0) || (newX < 0) || (newX >= map.Columns()) || (newY < 0) || (newY >= map.Rows())) { // we found a wall or got outside of the world reward = wallReward; } else { currentX = newX; currentY = newY; // check if we found the goal if ((currentX == agentStopX) && (currentY == agentStopY)) { reward = goalReward; } } return(Tuple.Create(reward, currentX, currentY)); }; // After defining all those functions, we create a new Sarsa algorithm: var explorationPolicy = new EpsilonGreedyExploration(explorationRate); var tabuPolicy = new TabuSearchExploration(4, explorationPolicy); var qLearning = new QLearning(256, 4, tabuPolicy); // curent coordinates of the agent int agentCurrentX = -1; int agentCurrentY = -1; bool needToStop = false; int iteration = 0; // loop while ((!needToStop) && (iteration < learningIterations)) { // set exploration rate for this iteration explorationPolicy.Epsilon = explorationRate - ((double)iteration / learningIterations) * explorationRate; // set learning rate for this iteration qLearning.LearningRate = learningRate - ((double)iteration / learningIterations) * learningRate; // clear tabu list tabuPolicy.ResetTabuList(); // reset agent's coordinates to the starting position agentCurrentX = agentStartX; agentCurrentY = agentStartY; // previous state and action int previousState = getState(agentCurrentX, agentCurrentY); int previousAction = qLearning.GetAction(previousState); // update agent's current position and get his reward var r = doAction(agentCurrentX, agentCurrentY, previousAction); double reward = r.Item1; agentCurrentX = r.Item2; agentCurrentY = r.Item3; // loop while ((!needToStop) && (iteration < learningIterations)) { // set exploration rate for this iteration explorationPolicy.Epsilon = explorationRate - ((double)iteration / learningIterations) * explorationRate; // set learning rate for this iteration qLearning.LearningRate = learningRate - ((double)iteration / learningIterations) * learningRate; // clear tabu list tabuPolicy.ResetTabuList(); // reset agent's coordinates to the starting position agentCurrentX = agentStartX; agentCurrentY = agentStartY; // steps performed by agent to get to the goal int steps = 0; while ((!needToStop) && ((agentCurrentX != agentStopX) || (agentCurrentY != agentStopY))) { steps++; // get agent's current state int currentState = getState(agentCurrentX, agentCurrentY); // get the action for this state int action = qLearning.GetAction(currentState); // update agent's current position and get his reward r = doAction(agentCurrentX, agentCurrentY, action); reward = r.Item1; agentCurrentX = r.Item2; agentCurrentY = r.Item3; // get agent's next state int nextState = getState(agentCurrentX, agentCurrentY); // do learning of the agent - update his Q-function qLearning.UpdateState(currentState, action, reward, nextState); // set tabu action tabuPolicy.SetTabuAction((action + 2) % 4, 1); } System.Diagnostics.Debug.WriteLine(steps); iteration++; } } // The end position for the robot will be (7, 4): int finalPosX = agentCurrentX; // 7 int finalPosY = agentCurrentY; // 4; #endregion Assert.AreEqual(7, finalPosX); Assert.AreEqual(4, finalPosY); }