Merge pull request #1175 from borglab/feature/add-missing-getters

noise models: Add getters for all model parameters
release/4.3a0
Frank Dellaert 2022-04-22 11:03:13 -04:00 committed by GitHub
commit b1d936e567
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 116 additions and 35 deletions

View File

@ -54,23 +54,31 @@ namespace noiseModel {
// clang-format on
namespace mEstimator {
//---------------------------------------------------------------------------------------
/**
* Pure virtual class for all robust error function classes.
*
* It provides the machinery for block vs scalar reweighting strategies, in
* addition to defining the interface of derived classes.
*/
class GTSAM_EXPORT Base {
public:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
enum ReweightScheme { Scalar, Block };
typedef boost::shared_ptr<Base> shared_ptr;
protected:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
/// Strategy for reweighting \sa ReweightScheme
ReweightScheme reweight_;
public:
Base(const ReweightScheme reweight = Block) : reweight_(reweight) {}
virtual ~Base() {}
/*
/// Returns the reweight scheme, as explained in ReweightScheme
ReweightScheme reweightScheme() const { return reweight_; }
/**
* This method is responsible for returning the total penalty for a given
* amount of error. For example, this method is responsible for implementing
* the quadratic function for an L2 penalty, the absolute value function for
@ -80,16 +88,20 @@ class GTSAM_EXPORT Base {
* error vector, then it prevents implementations of asymmeric loss
* functions. It would be better for this function to accept the vector and
* internally call the norm if necessary.
*
* This returns \rho(x) in \ref mEstimator
*/
virtual double loss(double distance) const { return 0; };
virtual double loss(double distance) const { return 0; }
/*
/**
* This method is responsible for returning the weight function for a given
* amount of error. The weight function is related to the analytic derivative
* of the loss function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with
* robust penalties using iteratively re-weighted least squares.
*
* This returns w(x) in \ref mEstimator
*/
virtual double weight(double distance) const = 0;
@ -124,7 +136,15 @@ class GTSAM_EXPORT Base {
}
};
/// Null class should behave as Gaussian
/** "Null" robust loss function, equivalent to a Gaussian pdf noise model, or
* plain least-squares (non-robust).
*
* This model has no additional parameters.
*
* - Loss \rho(x) = 0.5 x²
* - Derivative \phi(x) = x
* - Weight w(x) = \phi(x)/x = 1
*/
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
@ -146,7 +166,14 @@ class GTSAM_EXPORT Null : public Base {
}
};
/// Fair implements the "Fair" robust error model (Zhang97ivc)
/** Implementation of the "Fair" robust error model (Zhang97ivc)
*
* This model has a scalar parameter "c".
*
* - Loss \rho(x) = c² (|x|/c - log(1+|x|/c))
* - Derivative \phi(x) = x/(1+|x|/c)
* - Weight w(x) = \phi(x)/x = 1/(1+|x|/c)
*/
class GTSAM_EXPORT Fair : public Base {
protected:
double c_;
@ -160,6 +187,7 @@ class GTSAM_EXPORT Fair : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
private:
/** Serialization function */
@ -171,7 +199,14 @@ class GTSAM_EXPORT Fair : public Base {
}
};
/// Huber implements the "Huber" robust error model (Zhang97ivc)
/** The "Huber" robust error model (Zhang97ivc).
*
* This model has a scalar parameter "k".
*
* - Loss \rho(x) = 0.5 x² if |x|<k, 0.5 k² + k|x-k| otherwise
* - Derivative \phi(x) = x if |x|<k, k sgn(x) otherwise
* - Weight w(x) = \phi(x)/x = 1 if |x|<k, k/|x| otherwise
*/
class GTSAM_EXPORT Huber : public Base {
protected:
double k_;
@ -185,6 +220,7 @@ class GTSAM_EXPORT Huber : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return k_; }
private:
/** Serialization function */
@ -196,12 +232,19 @@ class GTSAM_EXPORT Huber : public Base {
}
};
/// Cauchy implements the "Cauchy" robust error model (Lee2013IROS). Contributed
/// by:
/// Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
/// Information Technology, Karlsruhe, Germany.
/// oberlaender@fzi.de
/// Thanks Jan!
/** Implementation of the "Cauchy" robust error model (Lee2013IROS).
* Contributed by:
* Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
* Information Technology, Karlsruhe, Germany.
* oberlaender@fzi.de
* Thanks Jan!
*
* This model has a scalar parameter "k".
*
* - Loss \rho(x) = 0.5 k² log(1+x²/k²)
* - Derivative \phi(x) = (k²x)/(x²+k²)
* - Weight w(x) = \phi(x)/x = k²/(x²+k²)
*/
class GTSAM_EXPORT Cauchy : public Base {
protected:
double k_, ksquared_;
@ -215,6 +258,7 @@ class GTSAM_EXPORT Cauchy : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return k_; }
private:
/** Serialization function */
@ -227,7 +271,14 @@ class GTSAM_EXPORT Cauchy : public Base {
}
};
/// Tukey implements the "Tukey" robust error model (Zhang97ivc)
/** Implementation of the "Tukey" robust error model (Zhang97ivc).
*
* This model has a scalar parameter "c".
*
* - Loss \rho(x) = c² (1 - (1-x²/c²)³)/6 if |x|<c, c²/6 otherwise
* - Derivative \phi(x) = x(1-x²/c²)² if |x|<c, 0 otherwise
* - Weight w(x) = \phi(x)/x = (1-x²/c²)² if |x|<c, 0 otherwise
*/
class GTSAM_EXPORT Tukey : public Base {
protected:
double c_, csquared_;
@ -241,6 +292,7 @@ class GTSAM_EXPORT Tukey : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
private:
/** Serialization function */
@ -252,7 +304,14 @@ class GTSAM_EXPORT Tukey : public Base {
}
};
/// Welsch implements the "Welsch" robust error model (Zhang97ivc)
/** Implementation of the "Welsch" robust error model (Zhang97ivc).
*
* This model has a scalar parameter "c".
*
* - Loss \rho(x) = -0.5 c² (exp(-x²/c²) - 1)
* - Derivative \phi(x) = x exp(-x²/c²)
* - Weight w(x) = \phi(x)/x = exp(-x²/c²)
*/
class GTSAM_EXPORT Welsch : public Base {
protected:
double c_, csquared_;
@ -266,6 +325,7 @@ class GTSAM_EXPORT Welsch : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
private:
/** Serialization function */
@ -278,12 +338,16 @@ class GTSAM_EXPORT Welsch : public Base {
}
};
/// GemanMcClure implements the "Geman-McClure" robust error model
/// (Zhang97ivc).
///
/// Note that Geman-McClure weight function uses the parameter c == 1.0,
/// but here it's allowed to use different values, so we actually have
/// the generalized Geman-McClure from (Agarwal15phd).
/** Implementation of the "Geman-McClure" robust error model (Zhang97ivc).
*
* Note that Geman-McClure weight function uses the parameter c == 1.0,
* but here it's allowed to use different values, so we actually have
* the generalized Geman-McClure from (Agarwal15phd).
*
* - Loss \rho(x) = 0.5 (c²x²)/(c²+x²)
* - Derivative \phi(x) = xc/(c²+x²)²
* - Weight w(x) = \phi(x)/x = c/(c²+x²)²
*/
class GTSAM_EXPORT GemanMcClure : public Base {
public:
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
@ -295,6 +359,7 @@ class GTSAM_EXPORT GemanMcClure : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
protected:
double c_;
@ -309,11 +374,18 @@ class GTSAM_EXPORT GemanMcClure : public Base {
}
};
/// DCS implements the Dynamic Covariance Scaling robust error model
/// from the paper Robust Map Optimization (Agarwal13icra).
///
/// Under the special condition of the parameter c == 1.0 and not
/// forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
/** DCS implements the Dynamic Covariance Scaling robust error model
* from the paper Robust Map Optimization (Agarwal13icra).
*
* Under the special condition of the parameter c == 1.0 and not
* forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
*
* This model has a scalar parameter "c" (with "units" of squared error).
*
* - Loss \rho(x) = (c²x² + cx)/(x²+c)² (for any "x")
* - Derivative \phi(x) = 2c²x/(x²+c)²
* - Weight w(x) = \phi(x)/x = 2c²/(x²+c)² if x²>c, 1 otherwise
*/
class GTSAM_EXPORT DCS : public Base {
public:
typedef boost::shared_ptr<DCS> shared_ptr;
@ -325,6 +397,7 @@ class GTSAM_EXPORT DCS : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
protected:
double c_;
@ -339,12 +412,19 @@ class GTSAM_EXPORT DCS : public Base {
}
};
/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
/// width 2*k, centered at the origin. The resulting penalty within the dead
/// zone is always zero, and grows quadratically outside the dead zone. In this
/// sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
/// robust to outliers. This penalty can be used to create barrier functions in
/// a general way.
/** L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
* width 2*k, centered at the origin. The resulting penalty within the dead
* zone is always zero, and grows quadratically outside the dead zone. In this
* sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
* robust to outliers. This penalty can be used to create barrier functions in
* a general way.
*
* This model has a scalar parameter "k".
*
* - Loss \rho(x) = 0 if |x|<k, 0.5(k-|x|)² otherwise
* - Derivative \phi(x) = 0 if |x|<k, (-k+x) if x>k, (k+x) if x<-k
* - Weight w(x) = \phi(x)/x = 0 if |x|<k, (-k+x)/x if x>k, (k+x)/x if x<-k
*/
class GTSAM_EXPORT L2WithDeadZone : public Base {
protected:
double k_;
@ -358,6 +438,7 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return k_; }
private:
/** Serialization function */