diff --git a/gtsam/linear/LossFunctions.h b/gtsam/linear/LossFunctions.h
index 1699b833f..d9cfc1f3c 100644
--- a/gtsam/linear/LossFunctions.h
+++ b/gtsam/linear/LossFunctions.h
@@ -54,23 +54,31 @@ namespace noiseModel {
// clang-format on
namespace mEstimator {
-//---------------------------------------------------------------------------------------
-
+/**
+ * Pure virtual class for all robust error function classes.
+ *
+ * It provides the machinery for block vs scalar reweighting strategies, in
+ * addition to defining the interface of derived classes.
+ */
class GTSAM_EXPORT Base {
public:
+ /** the rows can be weighted independently according to the error
+ * or uniformly with the norm of the right hand side */
enum ReweightScheme { Scalar, Block };
typedef boost::shared_ptr shared_ptr;
protected:
- /** the rows can be weighted independently according to the error
- * or uniformly with the norm of the right hand side */
+ /// Strategy for reweighting \sa ReweightScheme
ReweightScheme reweight_;
public:
Base(const ReweightScheme reweight = Block) : reweight_(reweight) {}
virtual ~Base() {}
- /*
+ /// Returns the reweight scheme, as explained in ReweightScheme
+ ReweightScheme reweightScheme() const { return reweight_; }
+
+ /**
* This method is responsible for returning the total penalty for a given
* amount of error. For example, this method is responsible for implementing
* the quadratic function for an L2 penalty, the absolute value function for
@@ -80,16 +88,20 @@ class GTSAM_EXPORT Base {
* error vector, then it prevents implementations of asymmeric loss
* functions. It would be better for this function to accept the vector and
* internally call the norm if necessary.
+ *
+ * This returns \rho(x) in \ref mEstimator
*/
- virtual double loss(double distance) const { return 0; };
+ virtual double loss(double distance) const { return 0; }
- /*
+ /**
* This method is responsible for returning the weight function for a given
* amount of error. The weight function is related to the analytic derivative
* of the loss function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with
* robust penalties using iteratively re-weighted least squares.
+ *
+ * This returns w(x) in \ref mEstimator
*/
virtual double weight(double distance) const = 0;
@@ -124,7 +136,15 @@ class GTSAM_EXPORT Base {
}
};
-/// Null class should behave as Gaussian
+/** "Null" robust loss function, equivalent to a Gaussian pdf noise model, or
+ * plain least-squares (non-robust).
+ *
+ * This model has no additional parameters.
+ *
+ * - Loss \rho(x) = 0.5 x²
+ * - Derivative \phi(x) = x
+ * - Weight w(x) = \phi(x)/x = 1
+ */
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr shared_ptr;
@@ -146,7 +166,14 @@ class GTSAM_EXPORT Null : public Base {
}
};
-/// Fair implements the "Fair" robust error model (Zhang97ivc)
+/** Implementation of the "Fair" robust error model (Zhang97ivc)
+ *
+ * This model has a scalar parameter "c".
+ *
+ * - Loss \rho(x) = c² (|x|/c - log(1+|x|/c))
+ * - Derivative \phi(x) = x/(1+|x|/c)
+ * - Weight w(x) = \phi(x)/x = 1/(1+|x|/c)
+ */
class GTSAM_EXPORT Fair : public Base {
protected:
double c_;
@@ -160,6 +187,7 @@ class GTSAM_EXPORT Fair : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
+ double modelParameter() const { return c_; }
private:
/** Serialization function */
@@ -171,7 +199,14 @@ class GTSAM_EXPORT Fair : public Base {
}
};
-/// Huber implements the "Huber" robust error model (Zhang97ivc)
+/** The "Huber" robust error model (Zhang97ivc).
+ *
+ * This model has a scalar parameter "k".
+ *
+ * - Loss \rho(x) = 0.5 x² if |x| shared_ptr;
@@ -295,6 +359,7 @@ class GTSAM_EXPORT GemanMcClure : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
+ double modelParameter() const { return c_; }
protected:
double c_;
@@ -309,11 +374,18 @@ class GTSAM_EXPORT GemanMcClure : public Base {
}
};
-/// DCS implements the Dynamic Covariance Scaling robust error model
-/// from the paper Robust Map Optimization (Agarwal13icra).
-///
-/// Under the special condition of the parameter c == 1.0 and not
-/// forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
+/** DCS implements the Dynamic Covariance Scaling robust error model
+ * from the paper Robust Map Optimization (Agarwal13icra).
+ *
+ * Under the special condition of the parameter c == 1.0 and not
+ * forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
+ *
+ * This model has a scalar parameter "c" (with "units" of squared error).
+ *
+ * - Loss \rho(x) = (c²x² + cx⁴)/(x²+c)² (for any "x")
+ * - Derivative \phi(x) = 2c²x/(x²+c)²
+ * - Weight w(x) = \phi(x)/x = 2c²/(x²+c)² if x²>c, 1 otherwise
+ */
class GTSAM_EXPORT DCS : public Base {
public:
typedef boost::shared_ptr shared_ptr;
@@ -325,6 +397,7 @@ class GTSAM_EXPORT DCS : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
+ double modelParameter() const { return c_; }
protected:
double c_;
@@ -339,12 +412,19 @@ class GTSAM_EXPORT DCS : public Base {
}
};
-/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
-/// width 2*k, centered at the origin. The resulting penalty within the dead
-/// zone is always zero, and grows quadratically outside the dead zone. In this
-/// sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
-/// robust to outliers. This penalty can be used to create barrier functions in
-/// a general way.
+/** L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
+ * width 2*k, centered at the origin. The resulting penalty within the dead
+ * zone is always zero, and grows quadratically outside the dead zone. In this
+ * sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
+ * robust to outliers. This penalty can be used to create barrier functions in
+ * a general way.
+ *
+ * This model has a scalar parameter "k".
+ *
+ * - Loss \rho(x) = 0 if |x|k, (k+x) if x<-k
+ * - Weight w(x) = \phi(x)/x = 0 if |x|k, (k+x)/x if x<-k
+ */
class GTSAM_EXPORT L2WithDeadZone : public Base {
protected:
double k_;
@@ -358,6 +438,7 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
+ double modelParameter() const { return k_; }
private:
/** Serialization function */