コード例 #1
0
    /// <summary>
    /// Get a new keypoint along the provided edgeId for the pose instance.
    /// </summary>
    /// <param name="edgeId"></param>
    /// <param name="sourceKeypoint"></param>
    /// <param name="targetKeypointId"></param>
    /// <param name="scores"></param>
    /// <param name="offsets"></param>
    /// <param name="stride"></param>
    /// <param name="displacements"></param>
    /// <returns>A new keypoint with the displaced coordinates</returns>
    static Keypoint TraverseToTargetKeypoint(
        int edgeId, Keypoint sourceKeypoint, int targetKeypointId,
        Tensor scores, Tensor offsets, int stride,
        Tensor displacements)
    {
        // Get heatmap dimensions
        int height = scores.height;
        int width  = scores.width;

        // Get neareast heatmap indices for source keypoint
        Vector2Int sourceKeypointIndices = GetStridedIndexNearPoint(
            sourceKeypoint.position, stride, height, width);
        // Retrieve the displacement values for the current indices
        Vector2 displacement = GetDisplacement(edgeId, sourceKeypointIndices, displacements);
        // Add the displacement values to the keypoint position
        Vector2 displacedPoint = sourceKeypoint.position + displacement;
        // Get neareast heatmap indices for displaced keypoint
        Vector2Int displacedPointIndices =
            GetStridedIndexNearPoint(displacedPoint, stride, height, width);
        // Get the offset vector for the displaced keypoint indices
        Vector2 offsetVector = GetOffsetVector(
            displacedPointIndices.y, displacedPointIndices.x, targetKeypointId,
            offsets);
        // Get the heatmap value at the displaced keypoint location
        float score = scores[0, displacedPointIndices.y, displacedPointIndices.x, targetKeypointId];
        // Calculate the position for the displaced keypoint
        Vector2 targetKeypoint = (displacedPointIndices * stride) + offsetVector;

        return(new Keypoint(score, targetKeypoint, targetKeypointId));
    }
コード例 #2
0
ファイル: KeypointXML.cs プロジェクト: msdgwzhy6/library
    public KeypointN(Keypoint kp)
    {
        if (kp.HasFV != true)
        {
            throw (new ArgumentException("While trying to generate integer " +
                                         "vector: source keypoint has no feature vector yet"));
        }

        x           = kp.X;
        y           = kp.Y;
        scale       = kp.Scale;
        orientation = kp.Orientation;

        dim        = kp.FVLinearDim;
        descriptor = new int[kp.FVLinearDim];

        for (int d = 0; d < kp.FVLinearDim; ++d)
        {
            descriptor[d] = (int)(255.0 * kp.FVLinearGet(d));
            if (descriptor[d] < 0 || descriptor[d] > 255)
            {
                throw (new ArgumentOutOfRangeException
                           ("Resulting integer descriptor k is not 0 <= k <= 255"));
            }
        }
    }
コード例 #3
0
 public NumberSequence(params double[] values)
 {
     _keypoints = new Keypoint[values.Length];
     for (var i = 0; i < values.Length; i++)
     {
         _keypoints[i] = new Keypoint((float)i / values.Length, values[i]);
     }
 }
コード例 #4
0
    Vector3 WorldPointForPart(FritzPose pose, FritzPoseParts posePart)
    {
        Keypoint keypoint = pose.keypoints[(int)posePart];
        var      x        = keypoint.position.x;
        var      y        = 1.0f - keypoint.position.y;
        var      position = new Vector3(x, y, 1f);

        return(m_Cam.ViewportToWorldPoint(position));
    }
コード例 #5
0
        /// <summary>
        /// Gets Vector3 translated to openviii coordinates from given track animation's frame id
        /// </summary>
        /// <param name="trackId"></param>
        /// <param name="frameId"></param>
        /// <returns></returns>
        public Vector3 GetTrackFrameVector(int trackId, int frameId)
        {
            Keypoint kp = railEntries[trackId].keypoints[frameId];

            return(new Vector3(Extended.ConvertVanillaWorldXAxisToOpenVIII(kp.x),
                               Extended.ConvertVanillaWorldYAxisToOpenVIII(kp.y),
                               Extended.ConvertVanillaWorldZAxisToOpenVIII(kp.Z)
                               ));
        }
コード例 #6
0
    /// <summary>
    /// Calculate the position of the provided keypoint in the input image
    /// </summary>
    /// <param name="part"></param>
    /// <param name="stride"></param>
    /// <param name="offsets"></param>
    /// <returns></returns>
    public static Vector2 GetImageCoords(Keypoint part, int stride, Tensor offsets)
    {
        // The accompanying offset vector for the current coords
        Vector2 offsetVector = GetOffsetVector((int)part.position.y, (int)part.position.x,
                                               part.id, offsets);

        // Scale the coordinates up to the input image resolution
        // Add the offset vectors to refine the key point location
        return((part.position * stride) + offsetVector);
    }
コード例 #7
0
        public PartVector(Keypoint keypoint)
        {
            float multiplier = Screen.width / poseCanvasWidth;

            score = keypoint.score;

            Vector3 originPose = new Vector3(Screen.width, poseCanvasHeight * multiplier, 0);
            Vector3 partPose   = new Vector3(keypoint.x * multiplier, keypoint.y * multiplier, 0);

            position = originPose - partPose;
        }
コード例 #8
0
        /// <summary/>
        public void Load(BinaryReader reader)
        {
            var keyCount = reader.ReadInt32();

            _keypoints = new Keypoint[keyCount];
            for (var i = 0; i < keyCount; i++)
            {
                var time  = reader.ReadSingle();
                var value = reader.ReadDouble();
                _keypoints[i] = new Keypoint(time, value);
            }
        }
    Keypoint[] DecodePose(PartWithScore root, float[,,,] scores, float[,,,] offsets,
                          int outputStride, float[,,,] displacementsFwd,
                          float[,,,] displacementsBwd)
    {
        var numParts = scores.GetLength(3);
        var numEdges = parentToChildEdges.Length;

        var instanceKeypoints = new Keypoint[numParts];

        // Start a new detection instance at the position of the root.
        var rootPart  = root.part;
        var rootScore = root.score;
        var rootPoint = GetImageCoords(rootPart, outputStride, offsets);

        instanceKeypoints[rootPart.id] = new Keypoint(
            rootScore,
            rootPoint,
            partNames[rootPart.id]
            );

        // Decode the part positions upwards in the tree, following the backward
        // displacements.
        for (var edge = numEdges - 1; edge >= 0; --edge)
        {
            var sourceKeypointId = parentToChildEdges[edge];
            var targetKeypointId = childToParentEdges[edge];
            if (instanceKeypoints[sourceKeypointId].score > 0.0f &&
                instanceKeypoints[targetKeypointId].score == 0.0f)
            {
                instanceKeypoints[targetKeypointId] = TraverseToTargetKeypoint(
                    edge, instanceKeypoints[sourceKeypointId], targetKeypointId, scores,
                    offsets, outputStride, displacementsBwd);
            }
        }

        // Decode the part positions downwards in the tree, following the forward
        // displacements.
        for (var edge = 0; edge < numEdges; ++edge)
        {
            var sourceKeypointId = childToParentEdges[edge];
            var targetKeypointId = parentToChildEdges[edge];
            if (instanceKeypoints[sourceKeypointId].score > 0.0f &&
                instanceKeypoints[targetKeypointId].score == 0.0f)
            {
                instanceKeypoints[targetKeypointId] = TraverseToTargetKeypoint(
                    edge, instanceKeypoints[sourceKeypointId], targetKeypointId, scores,
                    offsets, outputStride, displacementsFwd);
            }
        }

        return(instanceKeypoints);
    }
コード例 #10
0
        //记录提取的特征点及其数量
        private void SetFeature(LoweFeatureDetector featureDetector)
        {
            NumOfFeature = featureDetector.GlobalKeypoints.Count;
            features     = new Feature[NumOfFeature];
            for (int i = 0; i < NumOfFeature; i++)
            {
                Keypoint keypoint = (Keypoint)featureDetector.GlobalKeypoints[i];

                features[i] = new Feature(keypoint.Image, keypoint.X, keypoint.Y, keypoint.ImgScale, keypoint.Scale, keypoint.Orientation);
                features[i].CreateVector(4, 4, 8);
                features[i].HasFeatureVector = keypoint.HasFV;
                features[i].FeatureVector    = keypoint.FV;
            }
        }
コード例 #11
0
ファイル: FilterBank.cs プロジェクト: qianc123/visionnet
        /// <summary>
        /// Computes the descriptor as an array of filter responses.
        /// </summary>
        /// <param name="samples">Samples to compute the filter bank response for</param>
        /// <param name="pyramid">Pyramid to use when computing responses</param>
        /// <returns>Filter bank descriptor</returns>
        public virtual List <Keypoint> Compute <T>(List <ScaleSpaceSample> samples, ScaleSpacePyramid <T> pyramid) where T : IMultichannelImage <float>, new()
        {
            List <Keypoint> points = new List <Keypoint>();

            foreach (ScaleSpaceSample sample in samples)
            {
                float[] desc = new float[_filters.Count];
                for (int i = 0; i < desc.Length; i++)
                {
                    desc[i] = _filters[i].Compute <T>(sample, pyramid);
                }
                Keypoint point = new Keypoint(sample.X, sample.Y, sample.ImageScale, pyramid.ComputeSigma(sample.Octave, sample.Level), 0);
                point.Descriptor = desc;
                points.Add(point);
            }
            return(points);
        }
コード例 #12
0
    /// <summary>
    /// Detects multiple poses and finds their parts from part scores and displacement vectors.
    /// </summary>
    /// <param name="heatmaps"></param>
    /// <param name="offsets"></param>
    /// <param name="displacementsFwd"></param>
    /// <param name="displacementBwd"></param>
    /// <param name="stride"></param>
    /// <param name="maxPoseDetections"></param>
    /// <param name="scoreThreshold"></param>
    /// <param name="nmsRadius"></param>
    /// <returns>An array of poses up to maxPoseDetections in size</returns>
    public static Keypoint[][] DecodeMultiplePoses(
        Tensor heatmaps, Tensor offsets,
        Tensor displacementsFwd, Tensor displacementBwd,
        int stride, int maxPoseDetections,
        float scoreThreshold = 0.5f, int nmsRadius = 20)
    {
        // Stores the final poses
        List <Keypoint[]> poses = new List <Keypoint[]>();
        //
        float squaredNmsRadius = (float)nmsRadius * nmsRadius;

        // Get a list of indicies with the highest values within the provided radius.
        List <Keypoint> list = BuildPartList(scoreThreshold, kLocalMaximumRadius, heatmaps);

        // Order the list in descending order based on score
        list = list.OrderByDescending(x => x.score).ToList();

        // Decode poses until the max number of poses has been reach or the part list is empty
        while (poses.Count < maxPoseDetections && list.Count > 0)
        {
            // Get the part with the highest score in the list
            Keypoint root = list[0];
            // Remove the keypoint from the list
            list.RemoveAt(0);

            // Calculate the input image coordinates for the current part
            Vector2 rootImageCoords = GetImageCoords(root, stride, offsets);

            // Skip parts that are too close to existing poses
            if (WithinNmsRadiusOfCorrespondingPoint(
                    poses, squaredNmsRadius, rootImageCoords, root.id))
            {
                continue;
            }

            // Find the keypoints in the same pose as the root part
            Keypoint[] keypoints = DecodePose(
                root, heatmaps, offsets, stride, displacementsFwd,
                displacementBwd);

            // The current list of keypoints
            poses.Add(keypoints);
        }

        return(poses.ToArray());
    }
コード例 #13
0
    /// <summary>
    /// Follows the displacement fields to decode the full pose of the object
    /// instance given the position of a part that acts as root.
    /// </summary>
    /// <param name="root"></param>
    /// <param name="scores"></param>
    /// <param name="offsets"></param>
    /// <param name="stride"></param>
    /// <param name="displacementsFwd"></param>
    /// <param name="displacementsBwd"></param>
    /// <returns>An array of keypoints for a single pose</returns>
    static Keypoint[] DecodePose(Keypoint root, Tensor scores, Tensor offsets,
                                 int stride, Tensor displacementsFwd, Tensor displacementsBwd)
    {
        Keypoint[] instanceKeypoints = new Keypoint[scores.channels];

        // Start a new detection instance at the position of the root.
        Vector2 rootPoint = GetImageCoords(root, stride, offsets);

        instanceKeypoints[root.id] = new Keypoint(root.score, rootPoint, root.id);

        int numEdges = parentChildrenTuples.Length;

        // Decode the part positions upwards in the tree, following the backward
        // displacements.
        for (int edge = numEdges - 1; edge >= 0; --edge)
        {
            int sourceKeypointId = parentChildrenTuples[edge].Item2;
            int targetKeypointId = parentChildrenTuples[edge].Item1;
            if (instanceKeypoints[sourceKeypointId].score > 0.0f &&
                instanceKeypoints[targetKeypointId].score == 0.0f)
            {
                instanceKeypoints[targetKeypointId] = TraverseToTargetKeypoint(
                    edge, instanceKeypoints[sourceKeypointId], targetKeypointId, scores,
                    offsets, stride, displacementsBwd);
            }
        }

        // Decode the part positions downwards in the tree, following the forward
        // displacements.
        for (int edge = 0; edge < numEdges; ++edge)
        {
            int sourceKeypointId = parentChildrenTuples[edge].Item1;
            int targetKeypointId = parentChildrenTuples[edge].Item2;
            if (instanceKeypoints[sourceKeypointId].score > 0.0f &&
                instanceKeypoints[targetKeypointId].score == 0.0f)
            {
                instanceKeypoints[targetKeypointId] = TraverseToTargetKeypoint(
                    edge, instanceKeypoints[sourceKeypointId], targetKeypointId, scores,
                    offsets, stride, displacementsFwd);
            }
        }

        return(instanceKeypoints);
    }
        /// <summary>
        /// Draw the poses.
        /// </summary>
        /// <param name="image"></param>
        /// <param name="poses"></param>
        /// <param name="poseChain"></param>
        private void Drawing(Mat image, Pose[] poses, bool poseChain = false)
        {
            if (poses.Length > 0)
            {
                using (Graphics g = Graphics.FromImage(frame.Bitmap))
                {
                    for (int i = 0; i < poses.Length; i++)
                    {
                        Pose pose = poses[i];

                        if (pose.score > 0.15f)
                        {
                            for (int j = 0;
                                 j < (poseChain ? posenet.poseChain.GetLength(0) : jointPairs.GetLength(0));
                                 j++)
                            {
                                Keypoint point1 = pose.keypoints.FirstOrDefault(item => item.part.Equals(
                                                                                    poseChain ? posenet.poseChain[j].Item1 : jointPairs[j, 0]));
                                Keypoint point2 = pose.keypoints.FirstOrDefault(item => item.part.Equals(
                                                                                    poseChain ? posenet.poseChain[j].Item2 : jointPairs[j, 1]));

                                if (!point1.IsEmpty && point1.score >= 0.02)
                                {
                                    if (!point2.IsEmpty && point2.score >= 0.02)
                                    {
                                        g.DrawLine(skeletonColor, point1.position.X * xRate, point1.position.Y *
                                                   yRate, point2.position.X * xRate, point2.position.Y * yRate);
                                    }

                                    g.DrawEllipse(jointColor, point1.position.X * xRate, point1.position.Y * yRate, 3, 3);
                                }

                                if (!point2.IsEmpty && point2.score >= 0.02)
                                {
                                    g.DrawEllipse(jointColor, point2.position.X * xRate, point2.position.Y * yRate, 3, 3);
                                }
                            }
                        }
                    }
                }
            }
        }
コード例 #15
0
    private void CapAndNormalizeFV(Keypoint kp, double fvGradHicap)
    {
        double norm = 0.0;

        for (int n = 0; n < kp.FVLinearDim; ++n)
        {
            norm += Math.Pow(kp.FVLinearGet(n), 2.0);
        }

        norm = Math.Sqrt(norm);
        if (norm == 0.0)
        {
            throw (new InvalidOperationException
                       ("CapAndNormalizeFV cannot normalize with norm = 0.0"));
        }

        for (int n = 0; n < kp.FVLinearDim; ++n)
        {
            kp.FVLinearSet(n, kp.FVLinearGet(n) / norm);
        }

        for (int n = 0; n < kp.FVLinearDim; ++n)
        {
            if (kp.FVLinearGet(n) > fvGradHicap)
            {
                kp.FVLinearSet(n, fvGradHicap);
            }
        }

        norm = 0.0;
        for (int n = 0; n < kp.FVLinearDim; ++n)
        {
            norm += Math.Pow(kp.FVLinearGet(n), 2.0);
        }

        norm = Math.Sqrt(norm);

        for (int n = 0; n < kp.FVLinearDim; ++n)
        {
            kp.FVLinearSet(n, kp.FVLinearGet(n) / norm);
        }
    }
コード例 #16
0
    bool IsEyesAndNoseScoreGood(PoseVector poseVector)
    {
        bool isGoodScore = true;

        string[] faceKeypoints = { "nose", "leftEye", "rightEye" };
        foreach (FieldInfo field in poseVector.GetType().GetFields())
        {
            if (!(faceKeypoints.Contains(field.Name)))
            {
                continue;
            }

            Keypoint keypoint = (Keypoint)field.GetValue(poseVector);
            if (keypoint.score < accuracyThreshold)
            {
                isGoodScore = false;
                break;
            }
        }
        ;
        return(isGoodScore);
    }
コード例 #17
0
    /// <summary>
    /// Determine the estimated key point locations using the heatmaps and offsets tensors
    /// </summary>
    /// <param name="heatmaps">The heatmaps that indicate the confidence levels for key point locations</param>
    /// <param name="offsets">The offsets that refine the key point locations determined with the heatmaps</param>
    /// <returns>An array of keypoints for a single pose</returns>
    public static Keypoint[] DecodeSinglePose(Tensor heatmaps, Tensor offsets, int stride)
    {
        Keypoint[] keypoints = new Keypoint[heatmaps.channels];

        // Iterate through heatmaps
        for (int c = 0; c < heatmaps.channels; c++)
        {
            Keypoint part = new Keypoint();
            part.id = c;

            // Iterate through heatmap columns
            for (int y = 0; y < heatmaps.height; y++)
            {
                // Iterate through column rows
                for (int x = 0; x < heatmaps.width; x++)
                {
                    if (heatmaps[0, y, x, c] > part.score)
                    {
                        // Update the highest confidence for the current key point
                        part.score = heatmaps[0, y, x, c];

                        // Update the estimated key point coordinates
                        part.position.x = x;
                        part.position.y = y;
                    }
                }
            }

            // Calcluate the position in the input image for the current (x, y) coordinates
            part.position = GetImageCoords(part, stride, offsets);

            // Add the current keypoint to the list
            keypoints[c] = part;
        }

        return(keypoints);
    }
    /**
     * We get a new keypoint along the `edgeId` for the pose instance, assuming
     * that the position of the `idSource` part is already known. For this, we
     * follow the displacement vector from the source to target part (stored in
     * the `i`-t channel of the displacement tensor).
     */

    Keypoint TraverseToTargetKeypoint(
        int edgeId, Keypoint sourceKeypoint, int targetKeypointId,
        float[,,,] scores, float[,,,] offsets, int outputStride,
        float[,,,] displacements)
    {
        var height = scores.GetLength(1);
        var width  = scores.GetLength(2);

        // Nearest neighbor interpolation for the source->target displacements.
        var sourceKeypointIndices = GetStridedIndexNearPoint(
            sourceKeypoint.position, outputStride, height, width);

        var displacement =
            GetDisplacement(edgeId, sourceKeypointIndices, displacements);

        var displacedPoint = AddVectors(sourceKeypoint.position, displacement);

        var displacedPointIndices =
            GetStridedIndexNearPoint(displacedPoint, outputStride, height, width);

        var offsetPoint = GetOffsetPoint(
            displacedPointIndices.y, displacedPointIndices.x, targetKeypointId,
            offsets);

        var score = scores[0,
                           displacedPointIndices.y, displacedPointIndices.x, targetKeypointId];

        var targetKeypoint =
            AddVectors(
                new Vector2(
                    x: displacedPointIndices.x * outputStride,
                    y: displacedPointIndices.y * outputStride)
                , new Vector2(x: offsetPoint.X, y: offsetPoint.Y));

        return(new Keypoint(score, targetKeypoint, partNames[targetKeypointId]));
    }
コード例 #19
0
    private ArrayList GenerateKeypointSingle(double imgScale, ScalePoint point,
                                             int binCount, double peakRelThresh, int scaleCount,
                                             double octaveSigma)
    {
        double kpScale = octaveSigma *
                         Math.Pow(2.0, (point.Level + point.Local.ScaleAdjust) / scaleCount);

        double sigma    = 3.0 * kpScale;
        int    radius   = (int)(3.0 * sigma / 2.0 + 0.5);
        int    radiusSq = radius * radius;

        ImageMap magnitude = magnitudes[point.Level];
        ImageMap direction = directions[point.Level];

        int xMin = Math.Max(point.X - radius, 1);
        int xMax = Math.Min(point.X + radius, magnitude.XDim - 1);
        int yMin = Math.Max(point.Y - radius, 1);
        int yMax = Math.Min(point.Y + radius, magnitude.YDim - 1);

        double gaussianSigmaFactor = 2.0 * sigma * sigma;

        double[] bins = new double[binCount];

        for (int y = yMin; y < yMax; ++y)
        {
            for (int x = xMin; x < xMax; ++x)
            {
                int relX = x - point.X;
                int relY = y - point.Y;
                if (IsInCircle(relX, relY, radiusSq) == false)
                {
                    continue;
                }

                double gaussianWeight = Math.Exp
                                            (-((relX * relX + relY * relY) / gaussianSigmaFactor));

                int binIdx = FindClosestRotationBin(binCount, direction[x, y]);
                bins[binIdx] += magnitude[x, y] * gaussianWeight;
            }
        }


        AverageWeakBins(bins, binCount);

        double maxGrad = 0.0;
        int    maxBin  = 0;

        for (int b = 0; b < binCount; ++b)
        {
            if (bins[b] > maxGrad)
            {
                maxGrad = bins[b];
                maxBin  = b;
            }
        }

        double maxPeakValue, maxDegreeCorrection;

        InterpolateOrientation(bins[maxBin == 0 ? (binCount - 1) : (maxBin - 1)],
                               bins[maxBin], bins[(maxBin + 1) % binCount],
                               out maxDegreeCorrection, out maxPeakValue);

        bool[] binIsKeypoint = new bool[binCount];
        for (int b = 0; b < binCount; ++b)
        {
            binIsKeypoint[b] = false;

            if (b == maxBin)
            {
                binIsKeypoint[b] = true;
                continue;
            }

            if (bins[b] < (peakRelThresh * maxPeakValue))
            {
                continue;
            }

            int leftI  = (b == 0) ? (binCount - 1) : (b - 1);
            int rightI = (b + 1) % binCount;
            if (bins[b] <= bins[leftI] || bins[b] <= bins[rightI])
            {
                continue;
            }

            binIsKeypoint[b] = true;
        }

        ArrayList keypoints = new ArrayList();

        double oneBinRad = (2.0 * Math.PI) / binCount;

        for (int b = 0; b < binCount; ++b)
        {
            if (binIsKeypoint[b] == false)
            {
                continue;
            }

            int bLeft  = (b == 0) ? (binCount - 1) : (b - 1);
            int bRight = (b + 1) % binCount;

            double peakValue;
            double degreeCorrection;

            if (InterpolateOrientation(bins[bLeft], bins[b], bins[bRight],
                                       out degreeCorrection, out peakValue) == false)
            {
                throw (new InvalidOperationException("BUG: Parabola fitting broken"));
            }


            double degree = (b + degreeCorrection) * oneBinRad - Math.PI;

            if (degree < -Math.PI)
            {
                degree += 2.0 * Math.PI;
            }
            else if (degree > Math.PI)
            {
                degree -= 2.0 * Math.PI;
            }

            Keypoint kp = new Keypoint(imgScaled[point.Level],
                                       point.X + point.Local.FineX,
                                       point.Y + point.Local.FineY,
                                       imgScale, kpScale, degree);
            keypoints.Add(kp);
        }

        return(keypoints);
    }