private double[,] GetFaceAffine(Rect origRect, Point origLeftEye, Point origRightEye, Rect targetRect, Point targetLeftEye, Point targetRightEye) { // Step 1 - Construct the affine transformation // Find mapping between orig and desired EyePosAsMatrix locations + a // fake point located at right angles to the vector joing the two eyes //INumArray<float> origMat = ArrFactory.FloatArray(3, 2); //INumArray<float> targetMat = ArrFactory.FloatArray(3, 3); INumArray <float> targetMat = ArrFactory.FloatArray(3, 2); INumArray <float> origMat = ArrFactory.FloatArray(3, 3); FaceSortUI.ImageUtils.EyePosAsMatrix(origRect, origLeftEye, origRightEye, ref origMat); FaceSortUI.ImageUtils.EyePosAsMatrix(targetRect, targetLeftEye, targetRightEye, ref targetMat); //targetMat[0, 2] = 1.0F; //targetMat[1, 2] = 1.0F; //targetMat[2, 2] = 1.0F; //SVDFloat svd = new SVDFloat(targetMat); //INumArray<float> sss = svd.Solve(origMat); origMat[0, 2] = 1.0F; origMat[1, 2] = 1.0F; origMat[2, 2] = 1.0F; SVDFloat svd = new SVDFloat(origMat); INumArray <float> sss = svd.Solve(targetMat); INumArray <float> mmm = (INumArray <float>)sss.Transpose(); double[,] affineMat = ArrFactory.DoubleArray(mmm).ToArray(); return(affineMat); }
/// <summary> /// Project the list of image data in frontUIData into the embedding space /// </summary> /// <param name="frontUIData">Front Data Exchange</param> /// <returns></returns> private List <INumArray <double> > RankOneProjImgList(DataExchange frontUIData) { INumArray <double> l; INumArray <double> r; INumArray <double> data; INumArray <double> vecData; List <INumArray <double> > listImgVec = new List <INumArray <double> >();//list of projected image data int nEl = frontUIData.ElementCount; int nProj = _leftMatrix.size1; ExchangeElement exEl; for (int i = 0; i < nEl; i++) { exEl = frontUIData.GetElement(i); data = GetData(exEl); vecData = ArrFactory.DoubleArray(nProj); for (int j = 0; j < nProj; j++) { l = (INumArray <double>)(_leftMatrix.GetCol(j)); r = (INumArray <double>)(_rightMatrix.GetCol(j)); vecData[j] = (((INumArray <double>)(l.Transpose())).Mult(data).Mult(r))[0]; } listImgVec.Add(vecData); } return(listImgVec); }
/// <summary> /// The function to calculate the distance matrix of the elements listed in frontUIData /// </summary> /// <param name="frontUIData">front-backend Data Exchange</param> public void RankOneDistance(DataExchange frontUIData) { ParseConfig(frontUIData); string leftMat = m_strLeftMatFileName; string rightMat = m_strRightMatFileName; _leftMatrix = ArrFactory.DoubleArray(leftMat); _rightMatrix = ArrFactory.DoubleArray(rightMat); List <INumArray <double> > listImgVec = RankOneProjImgList(frontUIData); int nEl = listImgVec.Count; double[,] matrixDistance = new double[nEl, nEl]; for (int i = 0; i < nEl; i++) { for (int j = i; j < nEl; j++) { matrixDistance[i, j] = (listImgVec[i].Sub(listImgVec[j])).Magnitude(); matrixDistance[j, i] = matrixDistance[i, j]; } } frontUIData.DistanceMatrix = matrixDistance; }
/// <summary> /// Extract a normalized face from an input image. Normalization is done /// by ensuring that the eye positions in the original image /// map to the specified position in the face image. The input /// inage is assumed to be 1 byte per channel. Steps involved /// 1. Construct an affine mapping from Destination to original eye location /// 2. Fill in the destination image using the mapping /// </summary> /// <param name="origImage">Input image as a byte array</param> /// <param name="origRect">Size of the original image</param> /// <param name="origLeftEye">Left eye location in source image</param> /// <param name="origRightEye">Right eye location in source</param> /// <param name="bytePerPix"># bytes per Pixel in original image.Since we assume 1 byte per channel this is same as # channels Face is constructed with same</param> /// <param name="faceRect">Desired face size</param> /// <param name="faceLeftEye">Desired left eye location in face</param> /// <param name="faceRightEye">Desired right eye loc in face</param> /// <returns>Images array representing the colour plane of extracted face</returns> static public Image[] ExtractNormalizeFace(Image[] origImage, Rect origRect, Point origLeftEye, Point origRightEye, int bytePerPix, Rect faceRect, Point faceLeftEye, Point faceRightEye) { // Sanity check eye location if (false == origRect.Contains(origLeftEye) || false == origRect.Contains(origRightEye)) { return(null); } // Step 1 - Construct the affine transformation // Find mapping between orig and desired EyePosAsMatrix locations + a // fake point located at right angles to the vector joing the two eyes INumArray <float> origMat = ArrFactory.FloatArray(3, 2); INumArray <float> faceMat = ArrFactory.FloatArray(3, 3); EyePosAsMatrix(origRect, origLeftEye, origRightEye, ref origMat); EyePosAsMatrix(faceRect, faceLeftEye, faceRightEye, ref faceMat); faceMat[0, 2] = 1.0F; faceMat[1, 2] = 1.0F; faceMat[2, 2] = 1.0F; SVDFloat svd = new SVDFloat(faceMat); INumArray <float> sss = svd.Solve(origMat); INumArray <float> mmm = (INumArray <float>)sss.Transpose(); double [,] affineMat = ArrFactory.DoubleArray(mmm).ToArray(); return(TransformImage(origImage, origRect, faceRect, affineMat, bytePerPix)); }
/// <summary> /// place the coordinates to more efficiently use the screen /// space based on their orders in both x and y directions /// </summary> /// <param name="arrData">input data array</param> /// <returns></returns> private double[,] RMapData2Screen(double[,] arrData) { INumArray <double> shoArrData = ArrFactory.DoubleArray(arrData); INumArray <double> shoArrDataSorted; int nEl = shoArrData.size0; int nDim = shoArrData.size1; INumArray <int> shoArrInd = ArrFactory.IntArray(nEl, nDim); shoArrDataSorted = shoArrData.SortIndex(1, out shoArrInd); double[] arrCellStep = new double[nDim]; double[] arrCurCoord = new double[nDim]; double[,] arrNewData = new double[nEl, nDim]; int i, j; int index; for (i = 0; i < nDim; i++) { arrCellStep[i] = (double)((_maxCoord[i] - 10) / nEl); arrCurCoord[i] = 5.0; } for (i = 0; i < nEl; i++) { for (j = 0; j < nDim; j++) { index = shoArrInd[i, j]; arrNewData[index, j] = arrCurCoord[j]; arrCurCoord[j] += arrCellStep[j]; } } return(MapData2Screen(arrNewData)); }
/// <summary> /// Read the image data from ExchangeElement, subject to the transform, i.e., reSize, downsampling, etc. /// specified by the parameters. /// </summary> /// <param name="exEl">element in DataExchange</param> /// <returns></returns> private INumArray <double> GetData(ExchangeElement exEl) { INumArray <double> data; int nHeight = exEl.Height; int nWidth = exEl.Width; int i, j; int ipx = 0; if (m_bResize) { //if m_bResize=true, resize the data to certain size char[] charArr = new char[exEl.ByteData.GetLength(0)]; for (i = 0; i < exEl.ByteData.GetLength(0); i++) { charArr[i] = Convert.ToChar(exEl.ByteData[i]); } Dpu.ImageProcessing.Image dpuImgData = new Dpu.ImageProcessing.Image(charArr, nWidth, nHeight); Dpu.ImageProcessing.Image rstImgData = new Dpu.ImageProcessing.Image(m_iReSizeWidth, m_iReSizeHeight); Dpu.ImageProcessing.Image.BilinearResample(dpuImgData, rstImgData); data = ArrFactory.DoubleArray(m_iReSizeHeight, m_iReSizeWidth); float[] pixelData = rstImgData.Pixels; ipx = 0; for (i = 0; i < m_iReSizeHeight; i++) { for (j = 0; j < m_iReSizeWidth; j++) { data[i, j] = Convert.ToDouble(pixelData[ipx]); ipx += 1; } } } else { if (m_bDownSample) { data = ArrFactory.DoubleArray(nHeight / 2, nWidth / 2); ipx = 0; Byte[] imData = exEl.ByteData; for (i = 0; i < nHeight; i++) { for (j = 0; j < nWidth; j++) { data[i, j] = Convert.ToDouble(imData[ipx]); ipx += 2; } } } else { data = ArrFactory.DoubleArray(exEl.DoubleDataMatrix); } } if (m_bGlocalTrans) { data = DataTransform.GlocalTransform(data, _localWidth, _localHeight); } return(data); }