Пример #1
0
    Vector2 GetImageCoords(
        Part part, int outputStride, float[,,,] offsets)
    {
        var vec = GetOffsetPoint(part.heatmapY, part.heatmapX,
                                 part.id, offsets);

        return(new Vector2(
                   (float)(part.heatmapX * outputStride) + vec.x,
                   (float)(part.heatmapY * outputStride) + vec.y
                   ));
    }
Пример #2
0
        /// <summary>
        /// Receve an image from camera.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void Camera_ImageGrabbed(object sender, EventArgs e)
        {
            camera.Retrieve(frame);
            Mat blobs = DnnInvoke.BlobFromImage(frame, 1.0, new System.Drawing.Size(detectionSize, detectionSize), swapRB: true);

            net.SetInput(blobs);
            Mat outp = net.Forward();

            float[,,,] boxes = outp.GetData() as float[, , , ];

            for (int i = 0; i < boxes.GetLength(2); i++)
            {
                int classID = Convert.ToInt32(boxes[0, 0, i, 1]);

                float confidence = Convert.ToSingle(
                    boxes[0, 0, i, 2].ToString().Replace(",", "."), CultureInfo.InvariantCulture);

                if (confidence < 0.6)
                {
                    continue;
                }

                float Xstart = Convert.ToSingle(
                    boxes[0, 0, i, 3].ToString().Replace(",", "."), CultureInfo.InvariantCulture) * resolutionX;
                float Ystart = Convert.ToSingle(
                    boxes[0, 0, i, 4].ToString().Replace(",", "."), CultureInfo.InvariantCulture) * resolutionY;
                float Xend = Convert.ToSingle(
                    boxes[0, 0, i, 5].ToString().Replace(",", "."), CultureInfo.InvariantCulture) * resolutionX;
                float Yend = Convert.ToSingle(
                    boxes[0, 0, i, 6].ToString().Replace(",", "."), CultureInfo.InvariantCulture) * resolutionY;

                System.Drawing.Rectangle rect = new System.Drawing.Rectangle
                {
                    X      = (int)Xstart,
                    Y      = (int)Ystart,
                    Height = (int)(Yend - Ystart),
                    Width  = (int)(Xend - Xstart)
                };

                string label = labels[classID - 1];

                frame.Draw(rect, new Bgr(0, 255, 0), 2);
                frame.Draw(new System.Drawing.Rectangle((int)Xstart,
                                                        (int)Ystart - 35, label.Length * 18, 35), new Bgr(0, 255, 0), -1);
                CvInvoke.PutText(frame, label, new System.Drawing.Point((int)Xstart,
                                                                        (int)Ystart - 10), FontFace.HersheySimplex, 1.0, new MCvScalar(0, 0, 0), 2);
            }

            Dispatcher.Invoke(new Action(() =>
            {
                img.Source = frame.Bitmap.BitmapToBitmapSource();
            }));
        }
Пример #3
0
        protected float[,,] SeparableConv2D(float[,,,] kernel1, float[,,,] kernel2, float[] bias, float[,,] src, bool IsPaddingSame = false, int strideX = 1, int strideY = 1)
        {
            int kernelW          = kernel1.GetLength(0);
            int kernelH          = kernel1.GetLength(1);
            int filterCount      = kernel1.GetLength(2);
            int filterMultiplier = kernel1.GetLength(3);

            float[,,] res1 = DepthwiseConv2D(kernel1, new float[filterMultiplier], src, IsPaddingSame, strideX, strideY);
            float[,,] res2 = Conv2d(kernel2, bias, res1, IsPaddingSame, strideX, strideY);

            return(res2);
        }
Пример #4
0
 void Start()
 {
     HidenNeuronePoids      = new float[NBNe, NBC, Ne, Ne];
     HidenNeuronePerceptron = new float[NBNe, NBC, Ne];
     MeilleurePoids         = new float[NBC, Ne, Ne];
     MeilleurePerceptron    = new float[NBC, Ne];
     ScoreZero = new float[NBNe];
     for (s = 0; s < NBNe; s++)
     {
         RandomizeAllPoids(s);
     }
 }
Пример #5
0
    public void GraphSemanal(string parametro, int atributo, string corHex)
    {
        UIManager.HideRightOther();
        UIManager.HideLeftInformation();

        //Debug.Log("Graph Semanal");
        XYBarValues        newXY  = new XYBarValues();
        List <XYBarValues> listXY = new List <XYBarValues>();

        listXY.Clear();
        Color color = new Color();

        ColorUtility.TryParseHtmlString(corHex, out color);
        int countNanS = 0;
        int Ano       = int.Parse(GetYear()) - 2020;

        for (int i = 0; i < 4; i++)
        {
            float[,,,] mediaSemanalMensal = Sensor.getInstance().GetMediaSemanalMensal();

            //if (!float.IsNaN(mediaSemanalMensal[i, indexMonth, atributo, Ano]))
            if (mediaSemanalMensal[i, indexMonth, atributo, Ano] != 0 && !float.IsNaN(mediaSemanalMensal[i, indexMonth, atributo, Ano]))
            {
                ///Debug.Log("MediaSemanalMensal: " + mediaSemanalMensal[i, indexMonth, atributo, Ano].ToString());
                float media = (float)Math.Round(mediaSemanalMensal[i, indexMonth, atributo, Ano], 1);  // [i, MES, Atributo]

                newXY = new XYBarValues(semanaNome[i], media);
                listXY.Add(newXY);
                //Debug.Log("Caindo aqui2");

                debugText.text = "";
            }
            else if (float.IsNaN(mediaSemanalMensal[i, indexMonth, atributo, Ano]) || mediaSemanalMensal[i, indexMonth, atributo, Ano] == 0)
            {
                countNanS++;
                //Debug.Log("countNan: " + countNanS);
            }
        }

        if (countNanS <= 3)
        {
            //Debug.Log("GraaphSemanal::: CountNan >= 3 __CountNan: " +countNanS);
            CreateGraphFromData(listXY, parametro, color);
            chooseAnim = true;
        }
        else
        {
            debugText.text = "Não contem dados deste mês";
            chooseAnim     = false;
            //AnimateForNoData();
        }
    }
Пример #6
0
        public void Detect(Mat image, List <Rectangle> fullFaceRegions, List <Rectangle> partialFaceRegions)
        {
            int       imgDim    = 300;
            MCvScalar meanVal   = new MCvScalar(104, 177, 123);
            Size      imageSize = image.Size;

            using (Mat inputBlob = DnnInvoke.BlobFromImage(
                       image,
                       1.0,
                       new Size(imgDim, imgDim),
                       meanVal,
                       false,
                       false))
                _faceDetector.SetInput(inputBlob, "data");
            using (Mat detection = _faceDetector.Forward("detection_out"))
            {
                float confidenceThreshold = 0.5f;

                //List<Rectangle> fullFaceRegions = new List<Rectangle>();
                //List<Rectangle> partialFaceRegions = new List<Rectangle>();
                Rectangle imageRegion = new Rectangle(Point.Empty, image.Size);

                float[,,,] values = detection.GetData(true) as float[, , , ];
                for (int i = 0; i < values.GetLength(2); i++)
                {
                    float confident = values[0, 0, i, 2];

                    if (confident > confidenceThreshold)
                    {
                        float      xLeftBottom  = values[0, 0, i, 3] * imageSize.Width;
                        float      yLeftBottom  = values[0, 0, i, 4] * imageSize.Height;
                        float      xRightTop    = values[0, 0, i, 5] * imageSize.Width;
                        float      yRightTop    = values[0, 0, i, 6] * imageSize.Height;
                        RectangleF objectRegion = new RectangleF(
                            xLeftBottom,
                            yLeftBottom,
                            xRightTop - xLeftBottom,
                            yRightTop - yLeftBottom);
                        Rectangle faceRegion = Rectangle.Round(objectRegion);

                        if (imageRegion.Contains(faceRegion))
                        {
                            fullFaceRegions.Add(faceRegion);
                        }
                        else
                        {
                            partialFaceRegions.Add(faceRegion);
                        }
                    }
                }
            }
        }
    Keypoint[] DecodePose(PartWithScore root, float[,,,] scores, float[,,,] offsets,
                          int outputStride, float[,,,] displacementsFwd,
                          float[,,,] displacementsBwd)
    {
        var numParts = scores.GetLength(3);
        var numEdges = parentToChildEdges.Length;

        var instanceKeypoints = new Keypoint[numParts];

        // Start a new detection instance at the position of the root.
        var rootPart  = root.part;
        var rootScore = root.score;
        var rootPoint = GetImageCoords(rootPart, outputStride, offsets);

        instanceKeypoints[rootPart.id] = new Keypoint(
            rootScore,
            rootPoint,
            partNames[rootPart.id]
            );

        // Decode the part positions upwards in the tree, following the backward
        // displacements.
        for (var edge = numEdges - 1; edge >= 0; --edge)
        {
            var sourceKeypointId = parentToChildEdges[edge];
            var targetKeypointId = childToParentEdges[edge];
            if (instanceKeypoints[sourceKeypointId].score > 0.0f &&
                instanceKeypoints[targetKeypointId].score == 0.0f)
            {
                instanceKeypoints[targetKeypointId] = TraverseToTargetKeypoint(
                    edge, instanceKeypoints[sourceKeypointId], targetKeypointId, scores,
                    offsets, outputStride, displacementsBwd);
            }
        }

        // Decode the part positions downwards in the tree, following the forward
        // displacements.
        for (var edge = 0; edge < numEdges; ++edge)
        {
            var sourceKeypointId = childToParentEdges[edge];
            var targetKeypointId = parentToChildEdges[edge];
            if (instanceKeypoints[sourceKeypointId].score > 0.0f &&
                instanceKeypoints[targetKeypointId].score == 0.0f)
            {
                instanceKeypoints[targetKeypointId] = TraverseToTargetKeypoint(
                    edge, instanceKeypoints[sourceKeypointId], targetKeypointId, scores,
                    offsets, outputStride, displacementsFwd);
            }
        }

        return(instanceKeypoints);
    }
Пример #8
0
        protected float[,,] Conv2DTr(float[,,,] kernel, float[] bias, int strideX, int strideY, float[,,] src)
        {
            int W    = src.GetLength(0);
            int H    = src.GetLength(1);
            int Deep = src.GetLength(2);

            int kernelW       = kernel.GetLength(0);
            int kernelH       = kernel.GetLength(1);
            int filterChannel = kernel.GetLength(2);
            int filterCount   = kernel.GetLength(3);

            int resW = W * strideX;
            int resH = H * strideY;

            float[,,] res = new float[resW, resH, filterCount];

            for (int f = 0; f < filterCount; f++)
            {
                for (int x = 0; x < resW; x++)
                {
                    for (int y = 0; y < resH; y++)
                    {
                        res[x, y, f] = bias[f];
                    }
                }
            }


            for (int x = 0; x < W; x++)
            {
                for (int y = 0; y < H; y++)
                {
                    for (int f = 0; f < filterCount; f++)
                    {
                        for (int d = 0; d < Deep; d++)
                        {
                            for (int kx = 0; kx < kernelW; kx++)
                            {
                                for (int ky = 0; ky < kernelH; ky++)
                                {
                                    res[x * strideX + kx, y *strideY + ky, f] += src[x, y, d] * kernel[kx, ky, d, f];
                                }
                            }
                        }
                    }
                }
            }

            return(res);
        }
Пример #9
0
        // Ищем лица по списку изображений (SSD)
        public List <int[][]> DetectFacesSDD(List <string> imagePaths)
        {
            List <int[][]> allFaces = new List <int[][]>()
            {
            };
            int count = 0;

            // Ищем лица для каждого изображения
            foreach (var file in imagePaths)
            {
                List <int[]> faces = new List <int[]>();
                int          i     = 0;
                using (Image <Bgr, byte> image = new Image <Bgr, byte>(file))
                {
                    int cols = image.Width;

                    int rows = image.Height;

                    Net net = DnnInvoke.ReadNetFromTensorflow(_modelFile, _configFile);

                    net.SetInput(DnnInvoke.BlobFromImage(image.Mat, 1, new System.Drawing.Size(300, 300), default(MCvScalar), false, false));

                    Mat mat = net.Forward();

                    float[,,,] flt = (float[, , , ])mat.GetData();

                    for (int x = 0; x < flt.GetLength(2); x++)
                    {
                        if (flt[0, 0, x, 2] > 0.2)
                        {
                            int left   = Convert.ToInt32(flt[0, 0, x, 3] * cols);
                            int top    = Convert.ToInt32(flt[0, 0, x, 4] * rows);
                            int right  = Convert.ToInt32(flt[0, 0, x, 5] * cols) - left;
                            int bottom = Convert.ToInt32(flt[0, 0, x, 6] * rows) - top;

                            int[] face = new[] { left, top, right, bottom };
                            faces.Add(face);
                            i++;
                        }
                    }
                }

                allFaces.Add(faces.ToArray());
                Console.WriteLine(count);
                count++;
            }

            return(allFaces);
        }
Пример #10
0
 public void SetWorleyPoint(ref float[,,,] wCellPoint, int div, int tSize)
 {
     for (int x = 0; x < tSize / div; x++)
     {
         for (int y = 0; y < tSize / div; y++)
         {
             for (int z = 0; z < tSize / div; z++)
             {
                 wCellPoint[x, y, z, 0] = Random.value;
                 wCellPoint[x, y, z, 1] = Random.value;
                 wCellPoint[x, y, z, 2] = Random.value;
             }
         }
     }
 }
Пример #11
0
        private static int GetMaxConfidenceIdx(Mat detection, float[,,,] data)
        {
            var maxi          = 0;
            var maxConfidence = 0f;

            for (int i = 0; i < detection.SizeOfDimension[2]; i++)
            {
                if (data[0, 0, i, 2] > maxConfidence)
                {
                    maxi          = i;
                    maxConfidence = data[0, 0, i, 2];
                }
            }

            return(maxi);
        }
Пример #12
0
 /// <summary>
 /// Restructures the filter for matrix multiplication.
 /// </summary>
 /// <param name="filter">
 /// The filter matrix.
 /// </param>
 private void UnfoldConvolutionFilter(float[,,,] filter)
 {
     for (var i = 0; i < this.filterCount; i++)
     {
         for (var j = 0; j < this.inputDepth; j++)
         {
             for (var k = 0; k < this.kernelHeight; ++k)
             {
                 for (var l = 0; l < this.kernelWidth; ++l)
                 {
                     this.filterUnfolded[(((j * this.kernelHeight) + k) * this.kernelWidth) + l, i] = filter[i, j, k, l];
                 }
             }
         }
     }
 }
Пример #13
0
    void CreateShape()
    {
        // Run the model
        var runner = session.GetRunner();

        runner.AddInput(graph["decoder_input"][0], inputData);
        runner.Fetch(graph["decoder_output/Sigmoid"][0]);
        float[,,,] newImage = runner.Run()[0].GetValue() as float[, , , ];

        // Debug.Log("new image:"+newImage.Length);

        vertices = new Vector3[(xSize + 1) * (ySize + 1)];

        int i = 0;

        for (int y = 0; y < ySize + 1; y++)
        {
            for (int x = 0; x < xSize + 1; x++)
            {
                vertices[i] = new Vector3((float)x / xSize, (float)y / ySize, newImage[0, Mathf.Clamp(x, 0, xSize - 1), Mathf.Clamp(y, 0, ySize - 1), 0]);

                i++;
            }
        }

        triangles = new int[xSize * ySize * 6];

        int vert = 0;
        int tris = 0;

        for (int y = 0; y < ySize; y++)
        {
            for (int x = 0; x < xSize; x++)
            {
                triangles[tris + 0] = vert + 0;
                triangles[tris + 1] = vert + xSize + 1;
                triangles[tris + 2] = vert + 1;
                triangles[tris + 3] = vert + 1;
                triangles[tris + 4] = vert + xSize + 1;
                triangles[tris + 5] = vert + xSize + 2;

                vert++;
                tris += 6;
            }
            vert++;
        }
    }
Пример #14
0
 void EqualsWithError(float[,,,] arr, TextureData3D data, float error)
 {
     for (int z = 0; z < data.GetDepth(); z++)
     {
         for (int y = 0; y < data.GetHeight(); y++)
         {
             for (int x = 0; x < data.GetWidth(); x++)
             {
                 for (int c = 0; c < data.Channels; c++)
                 {
                     float diff = Math.Abs(arr[x, y, z, c] - data[x, y, z, c]);
                     Assert.IsTrue(diff <= error, arr[x, y, z, c] + " and " + data[x, y, z, c] + " have a error of " + diff);
                 }
             }
         }
     }
 }
Пример #15
0
 void SetPerlinVtx(ref float[,,,] pVtx, int div, int tSize)
 {
     for (int x = 0; x < tSize / div; x++)
     {
         for (int y = 0; y < tSize / div; y++)
         {
             for (int z = 0; z < tSize / div; z++)
             {
                 r                = Mathf.Pow(Random.value, 1 / 3);
                 phi              = Random.value * 6.2832f;
                 theta            = Random.value * 3.1416f;
                 pVtx[x, y, z, 0] = Mathf.Sin(phi) * Mathf.Cos(theta) * r;
                 pVtx[x, y, z, 1] = Mathf.Sin(phi) * Mathf.Sin(theta) * r;
                 pVtx[x, y, z, 2] = Mathf.Cos(phi) * r;
             }
         }
     }
 }
Пример #16
0
        //Deserialization constructor.

        public CVZ_MMCM(SerializationInfo info, StreamingContext ctxt)
            : base(info, ctxt)
        {
            //Get the values from info and assign them to the appropriate properties
            height      = (int)info.GetValue("Height", typeof(int));
            width       = (int)info.GetValue("Width", typeof(int));
            layers      = (int)info.GetValue("Layers", typeof(int));
            weights     = new Dictionary <string, float[, , , ]>();
            mapActivity = new float[width, height, layers];
            CreateHierarchicalModality(3, 0.0f, false);      //potential bug with herarchicalSYnchro
            int modalitiesCount = (int)info.GetValue("ModalitiesCount", typeof(int));

            for (int i = 0; i < modalitiesCount; i++)
            {
                float[, , ,] w = (float[, , , ])info.GetValue("ModalityWeights" + i, typeof(float[, , , ]));
                weights.Add(modalities.ElementAt(i).Key, w);
            }
        }
Пример #17
0
    /// <summary>
    /// Compute the euclidean distance between each pair of nodes
    /// </summary>
    private void ComputeDistances()
    {
        Distances = new float[gridSize.x, gridSize.y, gridSize.x, gridSize.y];

        // for each pair of nodes, store their actual distance
        for (int i = 0; i < gridSize.x; i++)
        {
            for (int j = 0; j < gridSize.y; j++)
            {
                for (int x = 0; x < gridSize.x; x++)
                {
                    for (int y = 0; y < gridSize.y; y++)
                    {
                        Distances[i, j, x, y] = Vector2Int.Distance(grid[i, j].position, grid[x, y].position);
                    }
                }
            }
        }
    }
Пример #18
0
        private static void WriteImageToSlice(Image image, float[,,,] slice)
        {
            switch (image)
            {
            case Image <Rgba32> r_image:
                for (int y = 0; y < image.Height; ++y)
                {
                    var line = r_image.GetPixelRowSpan(y);
                    for (int x = 0; x < image.Width; ++x)
                    {
                        slice[0, x, y, 0] = line[x].G;
                    }
                }
                return;

            default:
                throw new NotImplementedException("Not implemented yet");
            }
        }
Пример #19
0
    public void DebugDrawHeatmapBuff(float[,,,] heatmapBuff, int height, int width, int jointCount)
    {
        int heatmapType = 0;
        int slide       = 70; //nnInputWidthが46なのでこれくらい

        DrawBegin();
        Color color = Color.black;

        for (int j = 0; j < jointCount; ++j)
        {
            for (int y = 0; y < height; ++y)
            {
                GL.Begin(GL.LINE_STRIP);
                for (int x = 0; x < width; ++x)
                {
                    float v = Mathf.Min(1.0f, Mathf.Max(0.0f, heatmapBuff[y, x, j, heatmapType]));
                    color.r = color.g = color.b = v;
                    GL.Color(color);
                    GL.Vertex3((x + (j % 5) * slide) * displayScale, (y + (j / 5) * slide) * displayScale, 0f);
                }
                GL.End();
            }
        }

        for (int y = 0; y < height; ++y)
        {
            GL.Begin(GL.LINE_STRIP);
            for (int x = 0; x < width; ++x)
            {
                float v = 0;
                for (int j = 0; j < jointCount; ++j)
                {
                    v += heatmapBuff[y, x, j, heatmapType];
                }
                color.r = color.g = color.b = Mathf.Min(1.0f, Mathf.Max(0.0f, v));
                GL.Color(color);
                GL.Vertex3((x + 4 * slide) * displayScale, (y + 4 * slide) * displayScale, 0f);
            }
            GL.End();
        }
        DrawEnd();
    }
Пример #20
0
        public ConvolutionLayer(int depth, int height, int width, int filterCount, int kernelHeight, int kernelWidth, int padding)
        {
            var random = new RandomInitializer();

            this.depth        = depth;
            this.height       = height;
            this.width        = width;
            this.filterCount  = filterCount;
            this.kernelHeight = kernelHeight;
            this.kernelWidth  = kernelWidth;
            this.padding      = padding;
            this.outputHeight = height - kernelHeight + 1 + (2 * padding);
            this.outputWidth  = width - kernelWidth + 1 + (2 * padding);
            this.bias         = new float[filterCount];
            this.filter       = new float[filterCount, depth, kernelHeight, kernelWidth];
            for (var i = 0; i < filterCount; ++i)
            {
                for (var j = 0; j < depth; ++j)
                {
                    for (var k = 0; k < kernelHeight; ++k)
                    {
                        for (var l = 0; l < kernelWidth; ++l)
                        {
                            this.filter[i, j, k, l] = random.NextFloat();
                        }
                    }
                }
            }

            this.filterFlipped        = new float[depth, filterCount, kernelHeight, kernelWidth];
            this.inputWithPadding     = new float[depth, height + (2 * padding), width + (2 * padding)];
            this.output               = new float[filterCount, this.outputHeight, this.outputWidth];
            this.gradientCostOverBias = new float[filterCount];
            this.gradientCostOverOutputWithPadding = new float[filterCount, height + kernelHeight - 1, width + kernelWidth - 1];
            this.gradientCostOverWeights           = new float[filterCount, depth, kernelHeight, kernelWidth];
            this.gradientCostOverWeightsTemporary  = new float[filterCount, depth, kernelHeight, kernelWidth];
            this.gradientCostOverInput             = new float[depth, height, width];

            this.feedForwardConvolution    = new MultiChannelConvolution(depth, height + (2 * padding), width + (2 * padding), filterCount, kernelHeight, kernelWidth);
            this.filterGradientConvolution = new MultipleMonoChannelConvolution(depth, filterCount, height + (2 * padding), width + (2 * padding), this.outputHeight, this.outputWidth);
            this.inputGradientConvolution  = new MultiChannelConvolution(filterCount, height + kernelHeight - 1, width + kernelWidth - 1, depth, kernelHeight, kernelWidth);
        }
Пример #21
0
        /// <summary>
        /// Receive an image from camera.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void Camera_ImageGrabbed(object sender, EventArgs e)
        {
            camera.Retrieve(frame);

            //CvInvoke.Flip(frame, frame, Emgu.CV.CvEnum.FlipType.Horizontal);
            Mat blobs = DnnInvoke.BlobFromImage(frame, 1.0, new System.Drawing.Size(detectionSize, detectionSize));

            net.SetInput(blobs);
            Mat detections = net.Forward();

            float[,,,] detectionsArrayInFloats = detections.GetData() as float[, , , ];

            for (int i = 0; i < detectionsArrayInFloats.GetLength(2); i++)
            {
                if (Convert.ToSingle(detectionsArrayInFloats[0, 0, i, 2], CultureInfo.InvariantCulture) > 0.4)
                {
                    float Xstart = Convert.ToSingle(detectionsArrayInFloats[0, 0, i, 3],
                                                    CultureInfo.InvariantCulture) * detectionSize * xRate;
                    float Ystart = Convert.ToSingle(detectionsArrayInFloats[0, 0, i, 4],
                                                    CultureInfo.InvariantCulture) * detectionSize * yRate;
                    float Xend = Convert.ToSingle(detectionsArrayInFloats[0, 0, i, 5],
                                                  CultureInfo.InvariantCulture) * detectionSize * xRate;
                    float Yend = Convert.ToSingle(detectionsArrayInFloats[0, 0, i, 6],
                                                  CultureInfo.InvariantCulture) * detectionSize * yRate;

                    System.Drawing.Rectangle rect = new System.Drawing.Rectangle
                    {
                        X      = (int)Xstart,
                        Y      = (int)Ystart,
                        Height = (int)(Yend - Ystart),
                        Width  = (int)(Xend - Xstart)
                    };

                    frame.Draw(rect, new Bgr(0, 255, 0), 2);
                }
            }

            Dispatcher.Invoke(new Action(() =>
            {
                img.Source = frame.Bitmap.BitmapToBitmapSource();
            }));
        }
Пример #22
0
        private static float VolumeFloat(Box cube, float[,,,] moment)
        {
            return((moment[cube.AlphaMaximum, cube.RedMaximum, cube.GreenMaximum, cube.BlueMaximum] -
                    moment[cube.AlphaMaximum, cube.RedMaximum, cube.GreenMinimum, cube.BlueMaximum] -
                    moment[cube.AlphaMaximum, cube.RedMinimum, cube.GreenMaximum, cube.BlueMaximum] +
                    moment[cube.AlphaMaximum, cube.RedMinimum, cube.GreenMinimum, cube.BlueMaximum] -
                    moment[cube.AlphaMinimum, cube.RedMaximum, cube.GreenMaximum, cube.BlueMaximum] +
                    moment[cube.AlphaMinimum, cube.RedMaximum, cube.GreenMinimum, cube.BlueMaximum] +
                    moment[cube.AlphaMinimum, cube.RedMinimum, cube.GreenMaximum, cube.BlueMaximum] -
                    moment[cube.AlphaMinimum, cube.RedMinimum, cube.GreenMinimum, cube.BlueMaximum]) -

                   (moment[cube.AlphaMaximum, cube.RedMaximum, cube.GreenMaximum, cube.BlueMinimum] -
                    moment[cube.AlphaMinimum, cube.RedMaximum, cube.GreenMaximum, cube.BlueMinimum] -
                    moment[cube.AlphaMaximum, cube.RedMaximum, cube.GreenMinimum, cube.BlueMinimum] +
                    moment[cube.AlphaMinimum, cube.RedMaximum, cube.GreenMinimum, cube.BlueMinimum] -
                    moment[cube.AlphaMaximum, cube.RedMinimum, cube.GreenMaximum, cube.BlueMinimum] +
                    moment[cube.AlphaMinimum, cube.RedMinimum, cube.GreenMaximum, cube.BlueMinimum] +
                    moment[cube.AlphaMaximum, cube.RedMinimum, cube.GreenMinimum, cube.BlueMinimum] -
                    moment[cube.AlphaMinimum, cube.RedMinimum, cube.GreenMinimum, cube.BlueMinimum]));
        }
Пример #23
0
    public static double mean(float[,,,] tensor)
    {
        double sum = 0f;
        var    x   = tensor.GetLength(1);
        var    y   = tensor.GetLength(2);
        var    z   = tensor.GetLength(3);

        for (int i = 0; i < x; i++)
        {
            for (int j = 0; j < y; j++)
            {
                for (int k = 0; k < z; k++)
                {
                    sum += tensor[0, i, j, k];
                }
            }
        }
        var mean = sum / (x * y * z);

        return(mean);
    }
    PriorityQueue <float, PartWithScore> BuildPartWithScoreQueue(
        float scoreThreshold, int localMaximumRadius,
        float[,,,] scores)
    {
        var queue = new PriorityQueue <float, PartWithScore>();

        var height       = scores.GetLength(1);
        var width        = scores.GetLength(2);
        var numKeypoints = scores.GetLength(3);

        for (int heatmapY = 0; heatmapY < height; ++heatmapY)
        {
            for (int heatmapX = 0; heatmapX < width; ++heatmapX)
            {
                for (int keypointId = 0; keypointId < numKeypoints; ++keypointId)
                {
                    float score = scores[0, heatmapY, heatmapX, keypointId];

                    // Only consider parts with score greater or equal to threshold as
                    // root candidates.
                    if (score < scoreThreshold)
                    {
                        continue;
                    }

                    // Only consider keypoints whose score is maximum in a local window.
                    if (ScoreIsMaximumInLocalWindow(
                            keypointId, score, heatmapY, heatmapX, localMaximumRadius,
                            scores))
                    {
                        queue.Push(score, new PartWithScore(score,
                                                            new Part(heatmapX, heatmapY, keypointId)
                                                            ));
                    }
                }
            }
        }

        return(queue);
    }
Пример #25
0
    void Start()
    {
        rb            = car.GetComponent <Rigidbody2D>();
        col           = false;
        cur           = 1;
        max1          = 0f;
        max2          = 0f;
        generation    = 1;
        alive         = new int[13];
        fit           = new float[13];
        a             = new float[13, 4, 5, 5];
        rez           = new float[13, 4, 5, 5];
        probabilities = new float[13];
        ales          = new int[13];

        script = car.GetComponent <AIcontroller>();

        string path = Application.persistentDataPath + "/Text.txt";

        string[] savefile = File.ReadAllLines(path);
        if (savefile[1][0] - '0' == 1)
        {
            random();
        }
        else
        {
            load();
        }

        for (int l = 1; l <= 3; l++)
        {
            for (int i = 1; i <= 4; i++)
            {
                for (int j = 1; j <= 4; j++)
                {
                    script.a[l, i, j] = a[cur, l, i, j];
                }
            }
        }
    }
Пример #26
0
        /// <summary>
        /// Performs the convolution.
        /// </summary>
        /// <param name="input">
        /// The input matrix.
        /// </param>
        /// <param name="filter">
        /// The filter matrix.
        /// </param>
        /// <param name="output">
        /// The output matrix.
        /// </param>
        /// <exception cref="ArgumentException">
        /// Thrown when the size of matrices is incorrect.
        /// </exception>
        public void Convolve(float[,,] input, float[,,,] filter, float[,,] output)
        {
            if (input.GetLength(0) != this.inputDepth || input.GetLength(1) != this.inputHeight || input.GetLength(2) != this.inputWidth)
            {
                throw new ArgumentException("Wrong input size.", nameof(input));
            }

            if (filter.GetLength(0) != this.filterCount || filter.GetLength(1) != this.inputDepth || filter.GetLength(2) != this.kernelHeight || filter.GetLength(3) != this.kernelWidth)
            {
                throw new ArgumentException("Wrong input size.", nameof(filter));
            }

            if (output.GetLength(0) != this.filterCount || output.GetLength(1) != this.outputHeight || output.GetLength(2) != this.outputWidth)
            {
                throw new ArgumentException("Wrong input size.", nameof(output));
            }

            this.UnfoldConvolutionInput(input);
            this.UnfoldConvolutionFilter(filter);
            MatrixHelper.Multiply(this.inputUnfolded, this.filterUnfolded, this.outputUnfolded);
            this.FoldConvolutionOutput(output);
        }
Пример #27
0
    public Pose[] DecodeMultiplePoses(
        float[,,,] scores, float[,,,] offsets,
        float[,,,] displacementsFwd, float[,,,] displacementBwd,
        int outputStride, int maxPoseDetections,
        float scoreThreshold, int nmsRadius = 20)
    {
        var poses            = new List <Pose>();
        var squaredNmsRadius = (float)nmsRadius * nmsRadius;

        PriorityQueue <float, PartWithScore> queue = BuildPartWithScoreQueue(
            scoreThreshold, kLocalMaximumRadius, scores);

        while (poses.Count < maxPoseDetections && queue.Count > 0)
        {
            var root = queue.Pop().Value;

            // Part-based non-maximum suppression: We reject a root candidate if it
            // is within a disk of `nmsRadius` pixels from the corresponding part of
            // a previously detected instance.
            var rootImageCoords =
                GetImageCoords(root.part, outputStride, offsets);

            if (WithinNmsRadiusOfCorrespondingPoint(
                    poses, squaredNmsRadius, rootImageCoords, root.part.id))
            {
                continue;
            }

            // Start a new detection instance at the position of the root.
            var keypoints = DecodePose(
                root, scores, offsets, outputStride, displacementsFwd,
                displacementBwd);

            var score = GetInstanceScore(poses, squaredNmsRadius, keypoints);
            poses.Add(new Pose(keypoints, score));
        }

        return(poses.ToArray());
    }
Пример #28
0
        internal void AssertArray(ndarray arrayData, float[,,,] expectedData)
        {
            int lengthd0 = expectedData.GetLength(0);
            int lengthd1 = expectedData.GetLength(1);
            int lengthd2 = expectedData.GetLength(2);
            int lengthd3 = expectedData.GetLength(3);

            AssertShape(arrayData, lengthd0, lengthd1, lengthd2, lengthd3);
            AssertDataTypes(arrayData, expectedData);

            for (int i = 0; i < lengthd0; i++)
            {
                ndarray dim1Data = arrayData[i] as ndarray;
                for (int j = 0; j < lengthd1; j++)
                {
                    ndarray dim2Data = dim1Data[j] as ndarray;
                    for (int k = 0; k < lengthd2; k++)
                    {
                        ndarray dim3Data = dim2Data[k] as ndarray;
                        for (int l = 0; l < lengthd3; l++)
                        {
                            float E1 = expectedData[i, j, k, l];
                            float A1 = (float)dim3Data[l];

                            if (float.IsNaN(E1) && float.IsNaN(A1))
                            {
                                continue;
                            }
                            if (float.IsInfinity(E1) && float.IsInfinity(A1))
                            {
                                continue;
                            }

                            Assert.AreEqual(E1, A1, 0.00000001);
                        }
                    }
                }
            }
        }
Пример #29
0
        public ConvNet()
        {
            inpImage    = new float[imageSize, imageSize, imageDepth];
            poolingSize = imageSize / 2;

            filter           = new float[filterSize, filterSize, imageDepth, numFilters];
            filterBias       = new float[numFilters];
            filterBiasDeltas = new float[numFilters];
            featureMap       = new float[imageSize, imageSize, numFilters];
            pooling          = new float[poolingSize, poolingSize, numFilters];
            poolMask         = new float[imageSize, imageSize, numFilters];
            relUMask         = new float[imageSize, imageSize, numFilters];
            target           = new float[poolingSize, poolingSize, numFilters];
            maxPoolInd       = new int[poolingSize, poolingSize, numFilters];

            filterRadSize = (filterSize - 1) / 2;

            maxPoolDeltas    = new float[poolingSize, poolingSize, numFilters];
            featureMapDeltas = new float[imageSize, imageSize, numFilters];
            filterDeltas     = new float[filterSize, filterSize, imageDepth, numFilters];

            weights      = new float[poolingSize, poolingSize, numFilters];
            weightDeltas = new float[poolingSize, poolingSize, numFilters];
            singleTarget = 1f;

            node             = new float[numFilters];
            nodeWeights      = new float[poolingSize, poolingSize, numFilters];
            nodeWeightDeltas = new float[poolingSize, poolingSize, numFilters];
            nodeBias         = new float[numFilters];
            nodeBiasDeltas   = new float[numFilters];
            nodeId           = new int[numFilters];
            nodeTargets      = new float[numFilters];

            // Debugging
            debugFeatures = new float[imageSize, imageSize, numFilters];

            learningRate = 0.005f;
        }
Пример #30
0
        public RecognitionResult[] Recognize(Tensor image)
        {
            Output input = _graph["image_tensor"];

            Output[] outputs = new Output[] { _graph["detection_boxes"], _graph["detection_scores"], _graph["detection_classes"], _graph["num_detections"], _graph["detection_masks"] };

            Tensor[] finalTensor   = _session.Run(new Output[] { input }, new Tensor[] { image }, outputs);
            int      numDetections = (int)(finalTensor[3].Data as float[])[0];

            float[,,] detectinBoxes   = finalTensor[0].JaggedData as float[, , ];
            float[,] detectionScores  = finalTensor[1].JaggedData as float[, ];
            float[,] detectionClasses = finalTensor[2].JaggedData as float[, ];
            float[,,,] detectionMask  = finalTensor[4].JaggedData as float[, , , ];
            List <RecognitionResult> results = new List <RecognitionResult>();
            int numberOfClasses = detectionScores.GetLength(1);

            for (int i = 0; i < numDetections; i++)
            {
                RecognitionResult r = new RecognitionResult();
                r.Class       = (int)detectionClasses[0, i];
                r.Label       = Labels[r.Class - 1];
                r.Probability = detectionScores[0, i];
                r.Region      = new float[] { detectinBoxes[0, i, 0], detectinBoxes[0, i, 1], detectinBoxes[0, i, 2], detectinBoxes[0, i, 3] };
                results.Add(r);

                float[,] m = new float[detectionMask.GetLength(2), detectionMask.GetLength(3)];
                for (int j = 0; j < m.GetLength(0); j++)
                {
                    for (int k = 0; k < m.GetLength(1); k++)
                    {
                        m[j, k] = detectionMask[0, i, j, k];
                    }
                }
                r.Mask = m;
            }
            return(results.ToArray());
        }