commit
8051e743b7
|
@ -66,27 +66,6 @@ class KeySet {
|
|||
void serialize() const;
|
||||
};
|
||||
|
||||
// Actually a vector<Key>
|
||||
class KeyVector {
|
||||
KeyVector();
|
||||
KeyVector(const gtsam::KeyVector& other);
|
||||
|
||||
// Note: no print function
|
||||
|
||||
// common STL methods
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
void clear();
|
||||
|
||||
// structure specific methods
|
||||
size_t at(size_t i) const;
|
||||
size_t front() const;
|
||||
size_t back() const;
|
||||
void push_back(size_t key) const;
|
||||
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
// Actually a FastMap<Key,int>
|
||||
class KeyGroupMap {
|
||||
KeyGroupMap();
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using JacobianVector = std::vector<Matrix>;
|
||||
|
|
|
@ -72,8 +72,14 @@ class GTSAM_EXPORT GncParams {
|
|||
double relativeCostTol = 1e-5; ///< If relative cost change is below this threshold, stop iterating
|
||||
double weightsTol = 1e-4; ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS)
|
||||
Verbosity verbosity = SILENT; ///< Verbosity level
|
||||
std::vector<size_t> knownInliers = std::vector<size_t>(); ///< Slots in the factor graph corresponding to measurements that we know are inliers
|
||||
std::vector<size_t> knownOutliers = std::vector<size_t>(); ///< Slots in the factor graph corresponding to measurements that we know are outliers
|
||||
|
||||
//TODO(Varun) replace IndexVector with vector<size_t> once pybind11/stl.h is globally enabled.
|
||||
/// Use IndexVector for inliers and outliers since it is fast + wrapping
|
||||
using IndexVector = FastVector<uint64_t>;
|
||||
///< Slots in the factor graph corresponding to measurements that we know are inliers
|
||||
IndexVector knownInliers = IndexVector();
|
||||
///< Slots in the factor graph corresponding to measurements that we know are outliers
|
||||
IndexVector knownOutliers = IndexVector();
|
||||
|
||||
/// Set the robust loss function to be used in GNC (chosen among the ones in GncLossType).
|
||||
void setLossType(const GncLossType type) {
|
||||
|
@ -114,7 +120,7 @@ class GTSAM_EXPORT GncParams {
|
|||
* This functionality is commonly used in SLAM when one may assume the odometry is outlier free, and
|
||||
* only apply GNC to prune outliers from the loop closures.
|
||||
* */
|
||||
void setKnownInliers(const std::vector<size_t>& knownIn) {
|
||||
void setKnownInliers(const IndexVector& knownIn) {
|
||||
for (size_t i = 0; i < knownIn.size(); i++){
|
||||
knownInliers.push_back(knownIn[i]);
|
||||
}
|
||||
|
@ -125,7 +131,7 @@ class GTSAM_EXPORT GncParams {
|
|||
* corresponds to the slots in the factor graph. For instance, if you have a nonlinear factor graph nfg,
|
||||
* and you provide knownOut = {0, 2, 15}, GNC will not apply outlier rejection to nfg[0], nfg[2], and nfg[15].
|
||||
* */
|
||||
void setKnownOutliers(const std::vector<size_t>& knownOut) {
|
||||
void setKnownOutliers(const IndexVector& knownOut) {
|
||||
for (size_t i = 0; i < knownOut.size(); i++){
|
||||
knownOutliers.push_back(knownOut[i]);
|
||||
}
|
||||
|
@ -163,7 +169,7 @@ class GTSAM_EXPORT GncParams {
|
|||
std::cout << "knownInliers: " << knownInliers[i] << "\n";
|
||||
for (size_t i = 0; i < knownOutliers.size(); i++)
|
||||
std::cout << "knownOutliers: " << knownOutliers[i] << "\n";
|
||||
baseOptimizerParams.print(str);
|
||||
baseOptimizerParams.print("Base optimizer params: ");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -167,8 +167,9 @@ boost::shared_ptr<GaussianFactor> NoiseModelFactor::linearize(
|
|||
return GaussianFactor::shared_ptr(
|
||||
new JacobianFactor(terms, b,
|
||||
boost::static_pointer_cast<Constrained>(noiseModel_)->unit()));
|
||||
else
|
||||
else {
|
||||
return GaussianFactor::shared_ptr(new JacobianFactor(terms, b));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
//*************************************************************************
|
||||
// Custom Factor wrapping
|
||||
//*************************************************************************
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
#include <gtsam/nonlinear/CustomFactor.h>
|
||||
virtual class CustomFactor : gtsam::NoiseModelFactor {
|
||||
/*
|
||||
* Note CustomFactor will not be wrapped for MATLAB, as there is no supporting
|
||||
* machinery there. This is achieved by adding `gtsam::CustomFactor` to the
|
||||
* ignore list in `matlab/CMakeLists.txt`.
|
||||
*/
|
||||
CustomFactor();
|
||||
/*
|
||||
* Example:
|
||||
* ```
|
||||
* def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
|
||||
* <calculated error>
|
||||
* if not H is None:
|
||||
* <calculate the Jacobian>
|
||||
* H[0] = J1 # 2-d numpy array for a Jacobian block
|
||||
* H[1] = J2
|
||||
* ...
|
||||
* return error # 1-d numpy array
|
||||
*
|
||||
* cf = CustomFactor(noise_model, keys, error_func)
|
||||
* ```
|
||||
*/
|
||||
CustomFactor(const gtsam::SharedNoiseModel& noiseModel,
|
||||
const gtsam::KeyVector& keys,
|
||||
const gtsam::CustomErrorFunction& errorFunction);
|
||||
|
||||
void print(string s = "",
|
||||
gtsam::KeyFormatter keyFormatter = gtsam::DefaultKeyFormatter);
|
||||
};
|
||||
|
||||
}
|
|
@ -99,11 +99,11 @@ class NonlinearFactorGraph {
|
|||
string dot(
|
||||
const gtsam::Values& values,
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter,
|
||||
const GraphvizFormatting& writer = GraphvizFormatting());
|
||||
const gtsam::GraphvizFormatting& writer = gtsam::GraphvizFormatting());
|
||||
void saveGraph(
|
||||
const string& s, const gtsam::Values& values,
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter,
|
||||
const GraphvizFormatting& writer = GraphvizFormatting()) const;
|
||||
const gtsam::GraphvizFormatting& writer = gtsam::GraphvizFormatting()) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
@ -135,37 +135,6 @@ virtual class NoiseModelFactor : gtsam::NonlinearFactor {
|
|||
Vector whitenedError(const gtsam::Values& x) const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/CustomFactor.h>
|
||||
virtual class CustomFactor : gtsam::NoiseModelFactor {
|
||||
/*
|
||||
* Note CustomFactor will not be wrapped for MATLAB, as there is no supporting
|
||||
* machinery there. This is achieved by adding `gtsam::CustomFactor` to the
|
||||
* ignore list in `matlab/CMakeLists.txt`.
|
||||
*/
|
||||
CustomFactor();
|
||||
/*
|
||||
* Example:
|
||||
* ```
|
||||
* def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
|
||||
* <calculated error>
|
||||
* if not H is None:
|
||||
* <calculate the Jacobian>
|
||||
* H[0] = J1 # 2-d numpy array for a Jacobian block
|
||||
* H[1] = J2
|
||||
* ...
|
||||
* return error # 1-d numpy array
|
||||
*
|
||||
* cf = CustomFactor(noise_model, keys, error_func)
|
||||
* ```
|
||||
*/
|
||||
CustomFactor(const gtsam::SharedNoiseModel& noiseModel,
|
||||
const gtsam::KeyVector& keys,
|
||||
const gtsam::CustomErrorFunction& errorFunction);
|
||||
|
||||
void print(string s = "",
|
||||
gtsam::KeyFormatter keyFormatter = gtsam::DefaultKeyFormatter);
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
class Values {
|
||||
Values();
|
||||
|
@ -544,12 +513,34 @@ virtual class DoglegParams : gtsam::NonlinearOptimizerParams {
|
|||
};
|
||||
|
||||
#include <gtsam/nonlinear/GncParams.h>
|
||||
enum GncLossType {
|
||||
GM /*Geman McClure*/,
|
||||
TLS /*Truncated least squares*/
|
||||
};
|
||||
|
||||
template<PARAMS>
|
||||
virtual class GncParams {
|
||||
GncParams(const PARAMS& baseOptimizerParams);
|
||||
GncParams();
|
||||
void setVerbosityGNC(const This::Verbosity value);
|
||||
void print(const string& str) const;
|
||||
BaseOptimizerParameters baseOptimizerParams;
|
||||
gtsam::GncLossType lossType;
|
||||
size_t maxIterations;
|
||||
double muStep;
|
||||
double relativeCostTol;
|
||||
double weightsTol;
|
||||
Verbosity verbosity;
|
||||
gtsam::KeyVector knownInliers;
|
||||
gtsam::KeyVector knownOutliers;
|
||||
|
||||
void setLossType(const gtsam::GncLossType type);
|
||||
void setMaxIterations(const size_t maxIter);
|
||||
void setMuStep(const double step);
|
||||
void setRelativeCostTol(double value);
|
||||
void setWeightsTol(double value);
|
||||
void setVerbosityGNC(const gtsam::This::Verbosity value);
|
||||
void setKnownInliers(const gtsam::KeyVector& knownIn);
|
||||
void setKnownOutliers(const gtsam::KeyVector& knownOut);
|
||||
void print(const string& str = "GncParams: ") const;
|
||||
|
||||
enum Verbosity {
|
||||
SILENT,
|
||||
|
@ -597,6 +588,11 @@ virtual class GncOptimizer {
|
|||
GncOptimizer(const gtsam::NonlinearFactorGraph& graph,
|
||||
const gtsam::Values& initialValues,
|
||||
const PARAMS& params);
|
||||
void setInlierCostThresholds(const double inth);
|
||||
const Vector& getInlierCostThresholds();
|
||||
void setInlierCostThresholdsAtProbability(const double alpha);
|
||||
void setWeights(const Vector w);
|
||||
const Vector& getWeights();
|
||||
gtsam::Values optimize();
|
||||
};
|
||||
|
||||
|
@ -873,7 +869,7 @@ template <T = {gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2,
|
|||
gtsam::PinholeCamera<gtsam::Cal3Unified>,
|
||||
gtsam::imuBias::ConstantBias}>
|
||||
virtual class NonlinearEquality2 : gtsam::NoiseModelFactor {
|
||||
NonlinearEquality2(Key key1, Key key2, double mu = 1e4);
|
||||
NonlinearEquality2(gtsam::Key key1, gtsam::Key key2, double mu = 1e4);
|
||||
gtsam::Vector evaluateError(const T& x1, const T& x2);
|
||||
};
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ set(interface_files
|
|||
${GTSAM_SOURCE_DIR}/gtsam/geometry/geometry.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/linear/linear.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/nonlinear/nonlinear.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/nonlinear/custom.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/symbolic/symbolic.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/sam/sam.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/slam/slam.i
|
||||
|
|
|
@ -61,6 +61,7 @@ set(interface_headers
|
|||
${PROJECT_SOURCE_DIR}/gtsam/geometry/geometry.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/linear/linear.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/nonlinear/nonlinear.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/nonlinear/custom.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/symbolic/symbolic.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/sam/sam.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/slam/slam.i
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
/* Please refer to:
|
||||
* https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
* These are required to save one copy operation on Python calls.
|
||||
*
|
||||
* NOTES
|
||||
* =================
|
||||
*
|
||||
* `PYBIND11_MAKE_OPAQUE` will mark the type as "opaque" for the pybind11
|
||||
* automatic STL binding, such that the raw objects can be accessed in Python.
|
||||
* Without this they will be automatically converted to a Python object, and all
|
||||
* mutations on Python side will not be reflected on C++.
|
||||
*/
|
|
@ -9,4 +9,4 @@
|
|||
* automatic STL binding, such that the raw objects can be accessed in Python.
|
||||
* Without this they will be automatically converted to a Python object, and all
|
||||
* mutations on Python side will not be reflected on C++.
|
||||
*/
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
/* Please refer to:
|
||||
* https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
* These are required to save one copy operation on Python calls.
|
||||
*
|
||||
* NOTES
|
||||
* =================
|
||||
*
|
||||
* `py::bind_vector` and similar machinery gives the std container a Python-like
|
||||
* interface, but without the `<pybind11/stl.h>` copying mechanism. Combined
|
||||
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
|
||||
* and saves one copy operation.
|
||||
*/
|
|
@ -9,4 +9,4 @@
|
|||
* interface, but without the `<pybind11/stl.h>` copying mechanism. Combined
|
||||
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
|
||||
* and saves one copy operation.
|
||||
*/
|
||||
*/
|
||||
|
|
|
@ -15,10 +15,12 @@ from __future__ import print_function
|
|||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import (DoglegOptimizer, DoglegParams, DummyPreconditionerParameters,
|
||||
GaussNewtonOptimizer, GaussNewtonParams, GncLMParams, GncLMOptimizer,
|
||||
LevenbergMarquardtOptimizer, LevenbergMarquardtParams, NonlinearFactorGraph,
|
||||
Ordering, PCGSolverParameters, Point2, PriorFactorPoint2, Values)
|
||||
from gtsam import (
|
||||
DoglegOptimizer, DoglegParams, DummyPreconditionerParameters, GaussNewtonOptimizer,
|
||||
GaussNewtonParams, GncLMParams, GncLossType, GncLMOptimizer, LevenbergMarquardtOptimizer,
|
||||
LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2,
|
||||
PriorFactorPoint2, Values
|
||||
)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
KEY1 = 1
|
||||
|
@ -27,7 +29,6 @@ KEY2 = 2
|
|||
|
||||
class TestScenario(GtsamTestCase):
|
||||
"""Do trivial test with three optimizer variants."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up the optimization problem and ordering"""
|
||||
# create graph
|
||||
|
@ -83,16 +84,83 @@ class TestScenario(GtsamTestCase):
|
|||
actual = GncLMOptimizer(self.fg, self.initial_values, gncParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
def test_gnc_params(self):
|
||||
base_params = LevenbergMarquardtParams()
|
||||
# Test base params
|
||||
for base_max_iters in (50, 100):
|
||||
base_params.setMaxIterations(base_max_iters)
|
||||
params = GncLMParams(base_params)
|
||||
self.assertEqual(params.baseOptimizerParams.getMaxIterations(), base_max_iters)
|
||||
|
||||
# Test printing
|
||||
params_str = str(params)
|
||||
for s in (
|
||||
"lossType",
|
||||
"maxIterations",
|
||||
"muStep",
|
||||
"relativeCostTol",
|
||||
"weightsTol",
|
||||
"verbosity",
|
||||
):
|
||||
self.assertTrue(s in params_str)
|
||||
|
||||
# Test each parameter
|
||||
for loss_type in (GncLossType.TLS, GncLossType.GM):
|
||||
params.setLossType(loss_type) # Default is TLS
|
||||
self.assertEqual(params.lossType, loss_type)
|
||||
for max_iter in (1, 10, 100):
|
||||
params.setMaxIterations(max_iter)
|
||||
self.assertEqual(params.maxIterations, max_iter)
|
||||
for mu_step in (1.1, 1.2, 1.5):
|
||||
params.setMuStep(mu_step)
|
||||
self.assertEqual(params.muStep, mu_step)
|
||||
for rel_cost_tol in (1e-5, 1e-6, 1e-7):
|
||||
params.setRelativeCostTol(rel_cost_tol)
|
||||
self.assertEqual(params.relativeCostTol, rel_cost_tol)
|
||||
for weights_tol in (1e-4, 1e-3, 1e-2):
|
||||
params.setWeightsTol(weights_tol)
|
||||
self.assertEqual(params.weightsTol, weights_tol)
|
||||
for i in (0, 1, 2):
|
||||
verb = GncLMParams.Verbosity(i)
|
||||
params.setVerbosityGNC(verb)
|
||||
self.assertEqual(params.verbosity, verb)
|
||||
for inl in ([], [10], [0, 100]):
|
||||
params.setKnownInliers(inl)
|
||||
self.assertEqual(params.knownInliers, inl)
|
||||
params.knownInliers = []
|
||||
for out in ([], [1], [0, 10]):
|
||||
params.setKnownInliers(out)
|
||||
self.assertEqual(params.knownInliers, out)
|
||||
params.knownInliers = []
|
||||
|
||||
# Test optimizer params
|
||||
optimizer = GncLMOptimizer(self.fg, self.initial_values, params)
|
||||
for ict_factor in (0.9, 1.1):
|
||||
new_ict = ict_factor * optimizer.getInlierCostThresholds()
|
||||
optimizer.setInlierCostThresholds(new_ict)
|
||||
self.assertAlmostEqual(optimizer.getInlierCostThresholds(), new_ict)
|
||||
for w_factor in (0.8, 0.9):
|
||||
new_weights = w_factor * optimizer.getWeights()
|
||||
optimizer.setWeights(new_weights)
|
||||
self.assertAlmostEqual(optimizer.getWeights(), new_weights)
|
||||
optimizer.setInlierCostThresholdsAtProbability(0.9)
|
||||
w1 = optimizer.getInlierCostThresholds()
|
||||
optimizer.setInlierCostThresholdsAtProbability(0.8)
|
||||
w2 = optimizer.getInlierCostThresholds()
|
||||
self.assertLess(w2, w1)
|
||||
|
||||
def test_iteration_hook(self):
|
||||
# set up iteration hook to track some testable values
|
||||
iteration_count = 0
|
||||
final_error = 0
|
||||
final_values = None
|
||||
|
||||
def iteration_hook(iter, error_before, error_after):
|
||||
nonlocal iteration_count, final_error, final_values
|
||||
iteration_count = iter
|
||||
final_error = error_after
|
||||
final_values = optimizer.values()
|
||||
|
||||
# optimize
|
||||
params = LevenbergMarquardtParams.CeresDefaults()
|
||||
params.setOrdering(self.ordering)
|
||||
|
@ -104,5 +172,6 @@ class TestScenario(GtsamTestCase):
|
|||
self.assertEqual(self.fg.error(actual), final_error)
|
||||
self.assertEqual(optimizer.iterations(), iteration_count)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -567,7 +567,7 @@ TEST(GncOptimizer, optimizeWithKnownInliers) {
|
|||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
@ -644,7 +644,7 @@ TEST(GncOptimizer, barcsq) {
|
|||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
@ -691,7 +691,7 @@ TEST(GncOptimizer, setInlierCostThresholds) {
|
|||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
@ -763,7 +763,7 @@ TEST(GncOptimizer, optimizeSmallPoseGraph) {
|
|||
// GNC
|
||||
// Note: in difficult instances, we set the odometry measurements to be
|
||||
// inliers, but this problem is simple enought to succeed even without that
|
||||
// assumption std::vector<size_t> knownInliers;
|
||||
// assumption GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(*graph, *initial,
|
||||
gncParams);
|
||||
|
@ -784,12 +784,12 @@ TEST(GncOptimizer, knownInliersAndOutliers) {
|
|||
// nonconvexity with known inliers and known outliers (check early stopping
|
||||
// when all measurements are known to be inliers or outliers)
|
||||
{
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
||||
std::vector<size_t> knownOutliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownOutliers;
|
||||
knownOutliers.push_back(3);
|
||||
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
|
@ -813,11 +813,11 @@ TEST(GncOptimizer, knownInliersAndOutliers) {
|
|||
|
||||
// nonconvexity with known inliers and known outliers
|
||||
{
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(2);
|
||||
knownInliers.push_back(0);
|
||||
|
||||
std::vector<size_t> knownOutliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownOutliers;
|
||||
knownOutliers.push_back(3);
|
||||
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
|
@ -841,7 +841,7 @@ TEST(GncOptimizer, knownInliersAndOutliers) {
|
|||
|
||||
// only known outliers
|
||||
{
|
||||
std::vector<size_t> knownOutliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownOutliers;
|
||||
knownOutliers.push_back(3);
|
||||
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
|
@ -916,11 +916,11 @@ TEST(GncOptimizer, setWeights) {
|
|||
// initialize weights and also set known inliers/outliers
|
||||
{
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(2);
|
||||
knownInliers.push_back(0);
|
||||
|
||||
std::vector<size_t> knownOutliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownOutliers;
|
||||
knownOutliers.push_back(3);
|
||||
gncParams.setKnownInliers(knownInliers);
|
||||
gncParams.setKnownOutliers(knownOutliers);
|
||||
|
|
Loading…
Reference in New Issue