From 634682738e085d1a3c7d4ab2efdf9716971e1327 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Wed, 30 Sep 2020 23:25:20 -0700 Subject: [PATCH] renaming variables --- .../examples/TranslationAveragingExample.py | 116 +++++++++--------- 1 file changed, 60 insertions(+), 56 deletions(-) diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index 0f1314645..a374dc630 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -22,99 +22,103 @@ import numpy as np import gtsam from gtsam.examples import SFMdata +# Hyperparameters for 1dsfm, values used from Kyle Wilson's code. +MAX_1DSFM_PROJECTION_DIRECTIONS = 48 +OUTLIER_WEIGHT_THRESHOLD = 0.1 + def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]: - """"Returns data from SfMData.createPoses(). This contains global rotations and unit translations directions.""" + """"Returns global rotations and unit translation directions between 8 cameras + that lie on a circle and face the center. The poses of 8 cameras are obtained from SFMdata + and the unit translations directions between some camera pairs are computed from their + global translations. """ # Using toy dataset in SfMdata for example. - poses = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) - # Rotations of the cameras in the world frame - wRc. - rotations = gtsam.Values() - # Normalized translation directions for pairs of cameras - from first camera to second, - # in the coordinate frame of the first camera. - translation_directions = [] - for i in range(0, len(poses) - 2): + wTc = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) + # Rotations of the cameras in the world frame. + wRc_values = gtsam.Values() + # Normalized translation directions from camera i to camera j + # in the coordinate frame of camera i. + i_iZj_list = [] + for i in range(0, len(wTc) - 2): # Add the rotation. - rotations.insert(i, poses[i].rotation()) + wRc_values.insert(i, wTc[i].rotation()) # Create unit translation measurements with next two poses. for j in range(i + 1, i + 3): - i_Z_j = gtsam.Unit3(poses[i].rotation().unrotate( - poses[j].translation() - poses[i].translation())) - translation_directions.append(gtsam.BinaryMeasurementUnit3( - i, j, i_Z_j, gtsam.noiseModel.Isotropic.Sigma(3, 0.01))) + i_iZj = gtsam.Unit3(wTc[i].rotation().unrotate( + wTc[j].translation() - wTc[i].translation())) + i_iZj_list.append(gtsam.BinaryMeasurementUnit3( + i, j, i_iZj, gtsam.noiseModel.Isotropic.Sigma(3, 0.01))) # Add the last two rotations. - rotations.insert(len(poses) - 1, poses[-1].rotation()) - rotations.insert(len(poses) - 2, poses[-2].rotation()) - return (rotations, translation_directions) + wRc_values.insert(len(wTc) - 1, wTc[-1].rotation()) + wRc_values.insert(len(wTc) - 2, wTc[-2].rotation()) + return (wRc_values, i_iZj_list) -def estimate_poses(relative_translations: gtsam.BinaryMeasurementsUnit3, - rotations: gtsam.Values) -> gtsam.Values: - """Estimate poses given rotations normalized translation directions between cameras. +def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, + wRc_values: gtsam.Values) -> gtsam.Values: + """Estimate poses given rotations and normalized translation directions between cameras. Args: - relative_translations -- List of normalized translation directions between camera pairs, each direction - is from the first camera to the second, in the frame of the first camera. - rotations -- Rotations of the cameras in the world frame. + iZj_list -- List of normalized translation direction measurements between camera pairs, + Z here refers to measurements. The measurements are of camera j with reference + to camera i (iZj), in camera i's coordinate frame (i_). iZj represents a unit + vector to j in i's frame and is not a transformation. + wRc_values -- Rotations of the cameras in the world frame. Returns: Values -- Estimated poses. """ - # Some hyperparameters, values used from 1dsfm. - max_1dsfm_projection_directions = 48 - outlier_weight_threshold = 0.1 + # Convert the translation direction measurements to world frame using the rotations. + w_iZj_list = gtsam.BinaryMeasurementsUnit3() + for i_iZj in i_iZj_list: + w_iZj = gtsam.Unit3(wRc_values.atRot3(i_iZj.key1()) + .rotate(i_iZj.measured().point3())) + w_iZj_list.append(gtsam.BinaryMeasurementUnit3( + i_iZj.key1(), i_iZj.key2(), w_iZj, i_iZj.noiseModel())) - # Convert the translation directions to global frame using the rotations. - w_relative_translations = gtsam.BinaryMeasurementsUnit3() - for relative_translation in relative_translations: - w_relative_translation = gtsam.Unit3(rotations.atRot3(relative_translation.key1()) - .rotate(relative_translation.measured().point3())) - w_relative_translations.append(gtsam.BinaryMeasurementUnit3(relative_translation.key1(), - relative_translation.key2(), - w_relative_translation, - relative_translation.noiseModel())) - - # Indices of measurements that are to be used as projection directions. These are randomly chosen. - sampled_indices = np.random.choice(len(w_relative_translations), min( - max_1dsfm_projection_directions, len(w_relative_translations)), replace=False) + # Indices of measurements that are to be used as projection directions. + # These are randomly chosen. + sampled_indices = np.random.choice(len(w_iZj_list), min( + MAX_1DSFM_PROJECTION_DIRECTIONS, len(w_iZj_list)), replace=False) # Sample projection directions from the measurements. - projection_directions = [ - w_relative_translations[idx].measured() for idx in sampled_indices] + projection_directions = [w_iZj_list[idx].measured() + for idx in sampled_indices] outlier_weights = [] # Find the outlier weights for each direction using MFAS. for direction in projection_directions: - algorithm = gtsam.MFAS(w_relative_translations, direction) + algorithm = gtsam.MFAS(w_iZj_list, direction) outlier_weights.append(algorithm.computeOutlierWeights()) - # Compute average of outlier weights. Each outlier weight is a map from a pair of Keys (camera IDs) to a weight, - # where weights are proportional to the probability of the edge being an outlier. + # Compute average of outlier weights. Each outlier weight is a map from a pair of Keys + # (camera IDs) to a weight, where weights are proportional to the probability of the edge + # being an outlier. avg_outlier_weights = defaultdict(float) for outlier_weight_dict in outlier_weights: for keypair, weight in outlier_weight_dict.items(): avg_outlier_weights[keypair] += weight / len(outlier_weights) # Remove w_relative_tranlsations that have weight greater than threshold, these are outliers. - inlier_w_relative_translations = gtsam.BinaryMeasurementsUnit3() - [inlier_w_relative_translations.append(Z) for Z in w_relative_translations - if avg_outlier_weights[(Z.key1(), Z.key2())] < outlier_weight_threshold] + w_iZj_inliers = gtsam.BinaryMeasurementsUnit3() + [w_iZj_inliers.append(Z) for Z in w_iZj_list + if avg_outlier_weights[(Z.key1(), Z.key2())] < OUTLIER_WEIGHT_THRESHOLD] # Run the optimizer to obtain translations for normalized directions. - w_translations = gtsam.TranslationRecovery( - inlier_w_relative_translations).run() + wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run() - poses = gtsam.Values() - for key in rotations.keys(): - poses.insert(key, gtsam.Pose3( - rotations.atRot3(key), w_translations.atPoint3(key))) - return poses + wTc_values = gtsam.Values() + for key in wRc_values.keys(): + wTc_values.insert(key, gtsam.Pose3( + wRc_values.atRot3(key), wtc_values.atPoint3(key))) + return wTc_values def main(): - rotations, translation_directions = get_data() - poses = estimate_poses(translation_directions, rotations) + wRc_values, w_iZj_list = get_data() + wTc_values = estimate_poses(w_iZj_list, wRc_values) print("**** Translation averaging output ****") - print(poses) + print(wTc_values) print("**************************************")