/// <summary> /// update stereo feature tracking /// </summary> /// <param name="features">stereo features</param> /// <param name="img_width">image width</param> /// <param name="img_height">image height</param> public void update(stereoFeatures features, int img_width, int img_height) { stereoFeatures prev_features = null; if (matched_index_list.Count == 0) { IDs = new sentienceTrackingFeature[history_steps, 1000]; for (int h = 0; h < history_steps; h++) { matched_index_list.Add(new int[1000]); for (int i = 0; i < 1000; i++) { IDs[h, i] = null; } } } int prev_step = curr_step - 1; if (prev_step < 0) { prev_step += history_steps; } int[] matched_indexes = (int[])matched_index_list[curr_step]; int[] prev_matched_indexes = (int[])matched_index_list[prev_step]; if (prev_feature_list.Count > 1) { int prev_step2 = curr_step - 2; if (prev_step2 < 0) { prev_step2 += history_steps; } prev_features = (stereoFeatures)prev_feature_list[prev_step]; stereoFeatures prev_features2 = (stereoFeatures)prev_feature_list[prev_step2]; // predict the positions of features using the velocity values for (int i = 0; i < prev_features.no_of_features; i++) { if (IDs[prev_step, i] != null) { if ((IDs[prev_step, i].vx != 0) || ((IDs[prev_step, i].vy != 0))) { prev_features.features[(i * 3)] = IDs[prev_step, i].predicted_x; prev_features.features[(i * 3) + 1] = IDs[prev_step, i].predicted_y; } } } // match predicted feature positions with the currently observed ones features.match(prev_features, matched_indexes, img_width, img_height, max_displacement_x, max_displacement_y, true); // fill in any gaps features.match(prev_features2, prev_matched_indexes, img_width, img_height, max_displacement_x, max_displacement_y, true); for (int i = 0; i < features.no_of_features; i++) { // if the feature has been matched with a previous one update its ID if (matched_indexes[i] > -1) { IDs[curr_step, i] = IDs[prev_step, matched_indexes[i]]; } else { if (prev_matched_indexes[i] > -1) { IDs[curr_step, i] = IDs[prev_step2, prev_matched_indexes[i]]; } else { IDs[curr_step, i] = null; } } } } // update the persistence and average disparity for observed features for (int i = 0; i < features.no_of_features; i++) { int x = (int)features.features[i * 3]; int y = (int)features.features[(i * 3) + 1]; float disp = features.features[(i * 3) + 2]; if (IDs[curr_step, i] != null) { int dx = (int)(x - IDs[curr_step, i].predicted_x); if (dx < 0) { dx = -dx; } IDs[curr_step, i].updatePosition(x, y); float av_disparity = 0; if (dx < 50) { // if the feature has not moved very much IDs[curr_step, i].persistence++; IDs[curr_step, i].total_disparity += disp; av_disparity = IDs[curr_step, i].total_disparity / IDs[curr_step, i].persistence; } else { av_disparity = (IDs[curr_step, i].average_disparity * 0.9f) + (disp * 0.1f); } if (av_disparity > 0) { float disp_change = (av_disparity - disp) / av_disparity; float disp_confidence = 1.0f / (1.0f + (disp_change * disp_change)); int idx = matched_indexes[i]; if (idx > -1) { features.uncertainties[i] = prev_features.uncertainties[idx] - ((prev_features.uncertainties[idx] * disp_confidence * uncertainty_gain)); if (features.uncertainties[i] < 0.2f) { features.uncertainties[i] = 0.2f; } features.uncertainties[i] *= 1.02f; if (features.uncertainties[i] > 1) { features.uncertainties[i] = 1; } } } IDs[curr_step, i].average_disparity = av_disparity; features.features[(i * 3) + 2] = IDs[curr_step, i].average_disparity; } // create a new tracking feature if (IDs[curr_step, i] == null) { IDs[curr_step, i] = new sentienceTrackingFeature(max_ID, x, y, disp); max_ID++; if (max_ID > 30000) { max_ID = 1; } } } curr_step++; if (curr_step >= history_steps) { curr_step = 0; } if (prev_feature_list.Count < history_steps) { prev_feature_list.Add(features); } else { prev_feature_list[curr_step] = features; } //prev_features = features; }
/// <summary> /// update stereo feature tracking /// </summary> /// <param name="features">stereo features</param> /// <param name="img_width">image width</param> /// <param name="img_height">image height</param> public void update(stereoFeatures features, int img_width, int img_height) { stereoFeatures prev_features = null; if (matched_index_list.Count == 0) { IDs = new sentienceTrackingFeature[history_steps, 1000]; for (int h = 0; h < history_steps; h++) { matched_index_list.Add(new int[1000]); for (int i = 0; i < 1000; i++) IDs[h, i] = null; } } int prev_step = curr_step - 1; if (prev_step < 0) prev_step += history_steps; int[] matched_indexes = (int[])matched_index_list[curr_step]; int[] prev_matched_indexes = (int[])matched_index_list[prev_step]; if (prev_feature_list.Count > 1) { int prev_step2 = curr_step - 2; if (prev_step2 < 0) prev_step2 += history_steps; prev_features = (stereoFeatures)prev_feature_list[prev_step]; stereoFeatures prev_features2 = (stereoFeatures)prev_feature_list[prev_step2]; // predict the positions of features using the velocity values for (int i = 0; i < prev_features.no_of_features; i++) { if (IDs[prev_step, i] != null) { if ((IDs[prev_step, i].vx != 0) || ((IDs[prev_step, i].vy != 0))) { prev_features.features[(i * 3)] = IDs[prev_step, i].predicted_x; prev_features.features[(i * 3) + 1] = IDs[prev_step, i].predicted_y; } } } // match predicted feature positions with the currently observed ones features.match(prev_features, matched_indexes, img_width, img_height, max_displacement_x, max_displacement_y, true); // fill in any gaps features.match(prev_features2, prev_matched_indexes, img_width, img_height, max_displacement_x, max_displacement_y, true); for (int i = 0; i < features.no_of_features; i++) { // if the feature has been matched with a previous one update its ID if (matched_indexes[i] > -1) IDs[curr_step, i] = IDs[prev_step, matched_indexes[i]]; else { if (prev_matched_indexes[i] > -1) IDs[curr_step, i] = IDs[prev_step2, prev_matched_indexes[i]]; else IDs[curr_step, i] = null; } } } // update the persistence and average disparity for observed features for (int i = 0; i < features.no_of_features; i++) { int x = (int)features.features[i * 3]; int y = (int)features.features[(i * 3) + 1]; float disp = features.features[(i * 3) + 2]; if (IDs[curr_step, i] != null) { int dx = (int)(x - IDs[curr_step, i].predicted_x); if (dx < 0) dx = -dx; IDs[curr_step, i].updatePosition(x, y); float av_disparity = 0; if (dx < 50) { // if the feature has not moved very much IDs[curr_step, i].persistence++; IDs[curr_step, i].total_disparity += disp; av_disparity = IDs[curr_step, i].total_disparity / IDs[curr_step, i].persistence; } else { av_disparity = (IDs[curr_step, i].average_disparity * 0.9f) + (disp * 0.1f); } if (av_disparity > 0) { float disp_change = (av_disparity - disp) / av_disparity; float disp_confidence = 1.0f / (1.0f + (disp_change * disp_change)); int idx = matched_indexes[i]; if (idx > -1) { features.uncertainties[i] = prev_features.uncertainties[idx] - ((prev_features.uncertainties[idx] * disp_confidence * uncertainty_gain)); if (features.uncertainties[i] < 0.2f) features.uncertainties[i] = 0.2f; features.uncertainties[i] *= 1.02f; if (features.uncertainties[i] > 1) features.uncertainties[i] = 1; } } IDs[curr_step, i].average_disparity = av_disparity; features.features[(i * 3) + 2] = IDs[curr_step, i].average_disparity; } // create a new tracking feature if (IDs[curr_step, i] == null) { IDs[curr_step, i] = new sentienceTrackingFeature(max_ID, x, y, disp); max_ID++; if (max_ID > 30000) max_ID = 1; } } curr_step++; if (curr_step >= history_steps) curr_step = 0; if (prev_feature_list.Count < history_steps) prev_feature_list.Add(features); else prev_feature_list[curr_step] = features; //prev_features = features; }