int isExtremum(int octave, int interval, int c, int r) { //! Non Maximal Suppression function float val = getVal(octave, interval, c, r); int step = init_sample * COpenSURF.cvRound(COpenSURF.pow(2.0f, octave)); // reject points with low response to the det of hessian function if (val < thres) { return(0); } // check for maximum for (int i = -1; i <= 1; i++) { for (int j = -step; j <= step; j += step) { for (int k = -step; k <= step; k += step) { if (i != 0 || j != 0 || k != 0) { if (getVal(octave, interval + i, c + j, r + k) > val) { return(0); } } } } } return(1); }
void interp_extremum(int octv, int intvl, int r, int c) { double xi = 0, xr = 0, xc = 0; int step = init_sample * COpenSURF.cvRound(COpenSURF.pow(2.0f, octv)); // Get the offsets to the actual location of the extremum bool bok = interp_step(octv, intvl, r, c, out xi, out xr, out xc); if (bok == false) { return; } // If point is sufficiently close to the actual extremum if (COpenSURF.fabs((float)xi) <= 0.5 && COpenSURF.fabs((float)xr) <= 0.5 && COpenSURF.fabs((float)xc) <= 0.5) { // Create Ipoint and push onto Ipoints vector Ipoint ipt = new Ipoint(); ipt.x = (float)(c + step * xc); ipt.y = (float)(r + step * xr); ipt.scale = (float)((1.2f / 9.0f) * (3 * (COpenSURF.pow(2.0f, octv + 1) * (intvl + xi + 1) + 1))); ipt.laplacian = (int)getLaplacian(octv, intvl, c, r); ipts.Add(ipt); } }
void getUprightDescriptor() { int y, x, count = 0; int scale; float dx, dy, mdx, mdy; float gauss, rx, ry, len = 0.0f; float[] desc; Ipoint ipt = ipts[index]; scale = (int)ipt.scale; y = COpenSURF.cvRound(ipt.y); x = COpenSURF.cvRound(ipt.x); desc = ipt.descriptor; // Calculate descriptor for this interest point for (int i = -10; i < 10; i += 5) { for (int j = -10; j < 10; j += 5) { dx = dy = mdx = mdy = 0; for (int k = i; k < i + 5; ++k) { for (int l = j; l < j + 5; ++l) { // get Gaussian weighted x and y responses gauss = COpenSURF.gaussian(k * scale, l * scale, 3.3f * scale); rx = gauss * haarX(k * scale + y, l * scale + x, 2 * scale); ry = gauss * haarY(k * scale + y, l * scale + x, 2 * scale); dx += rx; dy += ry; mdx += COpenSURF.fabs(rx); mdy += COpenSURF.fabs(ry); } } // add the values to the descriptor vector desc[count++] = dx; desc[count++] = dy; desc[count++] = mdx; desc[count++] = mdy; // store the current length^2 of the vector len += dx * dx + dy * dy + mdx * mdx + mdy * mdy; } } // convert to unit vector len = (float)Math.Sqrt(len); for (int i = 0; i < 64; i++) { desc[i] /= len; } }
CDVMatrix hessian_3D(int octv, int intvl, int r, int c) { CDVMatrix vret = new CDVMatrix(); double v, dxx, dyy, dss, dxy, dxs, dys; int step = init_sample * COpenSURF.cvRound(COpenSURF.pow(2.0f, octv)); v = getValLowe(octv, intvl, r, c); dxx = (getValLowe(octv, intvl, r, c + step) + getValLowe(octv, intvl, r, c - step) - 2 * v); dyy = (getValLowe(octv, intvl, r + step, c) + getValLowe(octv, intvl, r - step, c) - 2 * v); dss = (getValLowe(octv, intvl + 1, r, c) + getValLowe(octv, intvl - 1, r, c) - 2 * v); dxy = (getValLowe(octv, intvl, r + step, c + step) - getValLowe(octv, intvl, r + step, c - step) - getValLowe(octv, intvl, r - step, c + step) + getValLowe(octv, intvl, r - step, c - step)) / 4.0; dxs = (getValLowe(octv, intvl + 1, r, c + step) - getValLowe(octv, intvl + 1, r, c - step) - getValLowe(octv, intvl - 1, r, c + step) + getValLowe(octv, intvl - 1, r, c - step)) / 4.0; dys = (getValLowe(octv, intvl + 1, r + step, c) - getValLowe(octv, intvl + 1, r - step, c) - getValLowe(octv, intvl - 1, r + step, c) + getValLowe(octv, intvl - 1, r - step, c)) / 4.0; /*** * cvmSet( H, 0, 0, dxx ); * cvmSet( H, 0, 1, dxy ); * cvmSet( H, 0, 2, dxs ); * * cvmSet( H, 1, 0, dxy ); * cvmSet( H, 1, 1, dyy ); * cvmSet( H, 1, 2, dys ); * * cvmSet( H, 2, 0, dxs ); * cvmSet( H, 2, 1, dys ); * cvmSet( H, 2, 2, dss ); ***/ vret.M11 = dxx; vret.M12 = dxy; vret.M13 = dxs; vret.M21 = dxy; vret.M22 = dyy; vret.M23 = dys; vret.M31 = dxs; vret.M32 = dys; vret.M33 = dss; return(vret); }
void deriv_3D(int octv, int intvl, int r, int c, out double dx, out double dy, out double ds) { dx = dy = ds = 0; int step = init_sample * COpenSURF.cvRound(COpenSURF.pow(2.0f, octv)); dx = (getValLowe(octv, intvl, r, c + step) - getValLowe(octv, intvl, r, c - step)) / 2.0; dy = (getValLowe(octv, intvl, r + step, c) - getValLowe(octv, intvl, r - step, c)) / 2.0; ds = (getValLowe(octv, intvl + 1, r, c) - getValLowe(octv, intvl - 1, r, c)) / 2.0; }
void getIpoint(int o, int i, int c, int r) { //! Interpolate feature to sub pixel accuracy bool converged = false; float[] x = new float[3]; for (int steps = 0; steps <= interp_steps; ++steps) { // perform a step of the interpolation stepInterp(o, i, c, r, x); // check stopping conditions if (COpenSURF.fabs(x[0]) < 0.5 && COpenSURF.fabs(x[1]) < 0.5 && COpenSURF.fabs(x[2]) < 0.5) { converged = true; break; } // find coords of different sample point c += COpenSURF.cvRound(x[0]); r += COpenSURF.cvRound(x[1]); i += COpenSURF.cvRound(x[2]); // check all param are within bounds if (i < 1 || i >= intervals - 1 || c < 1 || r < 1 || c > i_width - 1 || r > i_height - 1) { return; } } // if interpolation has not converged on a result if (!converged) { return; } // create Ipoint and push onto Ipoints vector Ipoint ipt = new Ipoint(); ipt.x = (float)(c + x[0]); ipt.y = (float)(r + x[1]); ipt.scale = (1.2f / 9.0f) * (3 * (COpenSURF.pow(2.0f, o + 1) * (i + x[2] + 1) + 1)); ipt.laplacian = (int)getLaplacian(o, i, c, r); if (ipts == null) { ipts = new List <Ipoint>(); } ipts.Add(ipt); }
void stepInterp(int o, int i, int c, int r, float[] x) { //! Perform a step of interpolation (fitting 3D quadratic) float v, dx, dy, ds, dxx, dyy, dss, dxy, dxs, dys, det; int step = init_sample * COpenSURF.cvRound(COpenSURF.pow(2.0f, o)); // value of current pixel v = getVal(o, i, c, r); // first order derivs in 3D dx = (getVal(o, i, c + step, r) - getVal(o, i, c - step, r)) / 2.0f; dy = (getVal(o, i, c, r + step) - getVal(o, i, c, r - step)) / 2.0f; ds = (getVal(o, i + 1, c, r) - getVal(o, i - 1, c, r)) / 2.0f; // second order derivs in 3D dxx = (getVal(o, i, c + step, r) + getVal(o, i, c - step, r) - 2 * v); dyy = (getVal(o, i, c, r + step) + getVal(o, i, c, r - step) - 2 * v); dss = (getVal(o, i + 1, c, r) + getVal(o, i - 1, c, r) - 2 * v); dxy = (getVal(o, i, c + step, r + step) - getVal(o, i, c - step, r + step) - getVal(o, i, c + step, r - step) + getVal(o, i, c - step, r - step)) / 4.0f; dxs = (getVal(o, i + 1, c + step, r) - getVal(o, i + 1, c - step, r) - getVal(o, i - 1, c + step, r) + getVal(o, i - 1, c - step, r)) / 4.0f; dys = (getVal(o, i + 1, c, r + step) - getVal(o, i + 1, c, r - step) - getVal(o, i - 1, c, r + step) + getVal(o, i - 1, c, r - step)) / 4.0f; // calculate determinant of: // | dxx dxy dxs | // | dxy dyy dys | // | dxs dys dss | det = dxx * (dyy * dss - dys * dys) - dxy * (dxy * dss - dxs * dys) + dxs * (dxy * dys - dxs * dyy); // calculate resulting vector after matrix mult: // | dxx dxy dxs |-1 | dx | // | dxy dyy dys | X | dy | // | dxs dys dss | | ds | x[0] = -1.0f / det * (dx * (dyy * dss - dys * dys) + dy * (dxs * dys - dss * dxy) + ds * (dxy * dys - dyy * dxs)); x[1] = -1.0f / det * (dx * (dys * dxs - dss * dxy) + dy * (dxx * dss - dxs * dxs) + ds * (dxs * dxy - dys * dxx)); x[2] = -1.0f / det * (dx * (dxy * dys - dxs * dyy) + dy * (dxy * dxs - dxx * dys) + ds * (dxx * dyy - dxy * dxy)); }
public void getIpoints() { //! Find the image features and write into vector of features int extremum_count = 0; // Clear the vector of exisiting ipts ipts.Clear(); // Calculate approximated determinant of hessian values // = Compute value for each position in the scale-space image. buildDet(); for (int o = 0; o < octaves; o++) { // for each octave double the sampling step of the previous int step = init_sample * COpenSURF.cvRound(COpenSURF.pow(2.0f, o)); // determine border width for the largest filter for each ocave int border = (3 * COpenSURF.cvRound(COpenSURF.pow(2.0f, o + 1) * (intervals) + 1) + 1) / 2; // check for maxima across the scale space for (int i = 1; i < intervals - 1; ++i) { for (int r = border; r < i_height - border; r += step) { for (int c = border; c < i_width - border; c += step) { if (isExtremum(o, i, c, r) != 0) { extremum_count += 1; interp_extremum(o, i, r, c); } } } } } }
void getOrientation() { Ipoint ipt = ipts[index]; float gauss = 0; float scale = ipt.scale; int s = COpenSURF.cvRound(scale); int r = COpenSURF.cvRound(ipt.y); int c = COpenSURF.cvRound(ipt.x); List <float> resX = new List <float>(); List <float> resY = new List <float>(); List <float> Ang = new List <float>(); // calculate haar responses for points within radius of 6*scale for (int i = -6 * s; i <= 6 * s; i += s) { for (int j = -6 * s; j <= 6 * s; j += s) { if (i * i + j * j < 36 * s * s) { gauss = COpenSURF.gaussian(i, j, 2.5f * s); float _resx = gauss * haarX(r + j, c + i, 4 * s); float _resy = gauss * haarY(r + j, c + i, 4 * s); resX.Add(_resx); resY.Add(_resy); Ang.Add(COpenSURF.getAngle(_resx, _resy)); } } } // calculate the dominant direction float sumX, sumY; float max = 0, old_max = 0, orientation = 0, old_orientation = 0; float ang1, ang2, ang; // loop slides pi/3 window around feature point for (ang1 = 0; ang1 < 2 * pi; ang1 += 0.2f) { ang2 = (ang1 + pi / 3.0f > 2 * pi ? ang1 - 5.0f * pi / 3.0f : ang1 + pi / 3.0f); sumX = sumY = 0; for (int k = 0; k < Ang.Count; k++) { // get angle from the x-axis of the sample point ang = Ang[k]; // determine whether the point is within the window if (ang1 < ang2 && ang1 < ang && ang < ang2) { sumX += resX[k]; sumY += resY[k]; } else if (ang2 < ang1 && ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2 * pi))) { sumX += resX[k]; sumY += resY[k]; } } // if the vector produced from this window is longer than all // previous vectors then this forms the new dominant direction if (sumX * sumX + sumY * sumY > max) { // store second largest orientation old_max = max; old_orientation = orientation; // store largest orientation max = sumX * sumX + sumY * sumY; orientation = COpenSURF.getAngle(sumX, sumY); } } // for(ang1 = 0; ang1 < 2*pi; ang1+=0.2f) // check whether there are two dominant orientations based on 0.8 threshold if (old_max >= 0.8 * max) { // assign second largest orientation and push copy onto vector ipt.orientation = old_orientation; ipts.Add(ipt); // Reset ipt to point to correct Ipoint in the vector ipt = ipts[index]; } // assign orientation of the dominant response vector ipt.orientation = orientation; }
void getDescriptor() { int y, x, count = 0; float dx, dy, mdx, mdy, co, si; float[] desc; int scale; int sample_x; int sample_y; float gauss, rx, ry, rrx, rry, len = 0; Ipoint ipt = ipts[index]; scale = (int)ipt.scale; x = COpenSURF.cvRound(ipt.x); y = COpenSURF.cvRound(ipt.y); co = (float)Math.Cos(ipt.orientation); si = (float)Math.Sin(ipt.orientation); desc = ipt.descriptor; // Calculate descriptor for this interest point for (int i = -10; i < 10; i += 5) { for (int j = -10; j < 10; j += 5) { dx = dy = mdx = mdy = 0; for (int k = i; k < i + 5; ++k) { for (int l = j; l < j + 5; ++l) { // Get coords of sample point on the rotated axis sample_x = COpenSURF.cvRound(x + (-l * scale * si + k * scale * co)); sample_y = COpenSURF.cvRound(y + (l * scale * co + k * scale * si)); // Get the gaussian weighted x and y responses gauss = COpenSURF.gaussian(k * scale, l * scale, 3.3f * scale); rx = gauss * haarX(sample_y, sample_x, 2 * scale); ry = gauss * haarY(sample_y, sample_x, 2 * scale); // Get the gaussian weighted x and y responses on rotated axis rrx = -rx * si + ry * co; rry = rx * co + ry * si; dx += rrx; dy += rry; mdx += COpenSURF.fabs(rrx); mdy += COpenSURF.fabs(rry); } } // add the values to the descriptor vector desc[count++] = dx; desc[count++] = dy; desc[count++] = mdx; desc[count++] = mdy; // store the current length^2 of the vector len += dx * dx + dy * dy + mdx * mdx + mdy * mdy; } // for (int j = -10; j < 10; j+=5) } // for (int i = -10; i < 10; i+=5) // convert to unit vector len = (float)Math.Sqrt(len); for (int i = 0; i < 64; i++) { desc[i] /= len; } }
void buildDet() { // Compute value for each position in the scale-space image. int lobe, border, step; float Dxx = 0, Dyy = 0, Dxy = 0, scale; int ixdet = 0; for (int o = 0; o < octaves; o++) { // calculate filter border for this octave border = (3 * COpenSURF.cvRound(COpenSURF.pow(2.0f, o + 1) * (intervals) + 1) + 1) / 2; step = init_sample * COpenSURF.cvRound(COpenSURF.pow(2.0f, o)); for (int i = 0; i < intervals; i++) { // calculate lobe length (filter side length/3) lobe = COpenSURF.cvRound(COpenSURF.pow(2.0f, o + 1) * (i + 1) + 1); scale = 1.0f / COpenSURF.pow((float)(3 * lobe), 2); for (int r = border; r < i_height - border; r += step) { for (int c = border; c < i_width - border; c += step) { /*** * Dyy = COpenSURF.Area(img, c - (lobe - 1), r - ((3 * lobe - 1) / 2), (2 * lobe - 1), lobe) * - 2 * COpenSURF.Area(img, c - (lobe - 1), r - ((lobe - 1) / 2), (2 * lobe - 1), lobe) + COpenSURF.Area(img, c - (lobe - 1), r + ((lobe + 1) / 2), (2 * lobe - 1), lobe); + + + Dxx = COpenSURF.Area(img, c - ((3 * lobe - 1) / 2), r - (lobe - 1), lobe, (2 * lobe - 1)) + - 2 * COpenSURF.Area(img, c - ((lobe - 1) / 2), r - (lobe - 1), lobe, (2 * lobe - 1)) + COpenSURF.Area(img, c + ((lobe + 1) / 2), r - (lobe - 1), lobe, (2 * lobe - 1)); + + Dxy = COpenSURF.Area(img, c - lobe, r - lobe, lobe, lobe) + COpenSURF.Area(img, c + 1, r + 1, lobe, lobe) + - COpenSURF.Area(img, c - lobe, r + 1, lobe, lobe) + - COpenSURF.Area(img, c + 1, r - lobe, lobe, lobe); ***/ { float dyy0 = COpenSURF.Area(img, c - (lobe - 1), r - ((3 * lobe - 1) / 2), (2 * lobe - 1), lobe); float dyy1 = COpenSURF.Area(img, c - (lobe - 1), r - ((lobe - 1) / 2), (2 * lobe - 1), lobe); float dyy2 = COpenSURF.Area(img, c - (lobe - 1), r + ((lobe + 1) / 2), (2 * lobe - 1), lobe); Dyy = dyy0 - 2 * dyy1 + dyy2; } { float dxx0 = COpenSURF.Area(img, c - ((3 * lobe - 1) / 2), r - (lobe - 1), lobe, (2 * lobe - 1)); float dxx1 = COpenSURF.Area(img, c - ((lobe - 1) / 2), r - (lobe - 1), lobe, (2 * lobe - 1)); float dxx2 = COpenSURF.Area(img, c + ((lobe + 1) / 2), r - (lobe - 1), lobe, (2 * lobe - 1)); Dxx = dxx0 - 2 * dxx1 + dxx2; } { float dxy0 = COpenSURF.Area(img, c - lobe, r - lobe, lobe, lobe); float dxy1 = COpenSURF.Area(img, c + 1, r + 1, lobe, lobe); float dxy2 = COpenSURF.Area(img, c - lobe, r + 1, lobe, lobe); float dxy3 = COpenSURF.Area(img, c + 1, r - lobe, lobe, lobe); Dxy = dxy0 + dxy1 - dxy2 - dxy3; } // Normalise the filter responses with respect to their size Dxx *= scale; Dyy *= scale; Dxy *= scale; // Get the sign of the laplacian int lap_sign = (Dxx + Dyy >= 0 ? 1 : -1); // Get the determinant of hessian response float res = (Dxx * Dyy - 0.9f * 0.9f * Dxy * Dxy); res = (res < thres ? 0 : lap_sign * res); // calculate approximated determinant of hessian value m_det[(o * intervals + i) * (i_width * i_height) + (r * i_width + c)] = res; ixdet += 1; if (res > 0) { } } } } } }