Merge branch 'develop' into feature/noisemodel_rename_functions

release/4.3a0
yetongumich 2020-04-03 14:22:26 -04:00 committed by GitHub
commit a17696298d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 56 additions and 48 deletions

18
gtsam.h
View File

@ -1458,7 +1458,7 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double loss(double error) const;
double residual(double error) const;
};
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
@ -1469,7 +1469,7 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double loss(double error) const;
double residual(double error) const;
};
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
@ -1480,7 +1480,7 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double loss(double error) const;
double residual(double error) const;
};
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
@ -1491,7 +1491,7 @@ virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double loss(double error) const;
double residual(double error) const;
};
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
@ -1502,7 +1502,7 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double loss(double error) const;
double residual(double error) const;
};
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
@ -1513,7 +1513,7 @@ virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double loss(double error) const;
double residual(double error) const;
};
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
@ -1524,7 +1524,7 @@ virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double loss(double error) const;
double residual(double error) const;
};
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
@ -1535,7 +1535,7 @@ virtual class DCS: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double loss(double error) const;
double residual(double error) const;
};
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
@ -1546,7 +1546,7 @@ virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double loss(double error) const;
double residual(double error) const;
};
}///\namespace mEstimator

View File

@ -1008,7 +1008,14 @@ TEST(Pose3, print) {
// Generate the expected output
std::stringstream expected;
Point3 translation(1, 2, 3);
#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS
expected << "1\n"
"2\n"
"3;\n";
#else
expected << '[' << translation.x() << ", " << translation.y() << ", " << translation.z() << "]\';";
#endif
// reset cout to the original stream
std::cout.rdbuf(oldbuf);

View File

@ -36,12 +36,12 @@ namespace noiseModel {
* The mEstimator name space contains all robust error functions.
* It mirrors the exposition at
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* which talks about minimizing \sum \rho(r_i), where \rho is a loss function of choice.
* which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice.
*
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
*
* Name Symbol Least-Squares L1-norm Huber
* Loss \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
*
@ -75,7 +75,7 @@ class GTSAM_EXPORT Base {
* the quadratic function for an L2 penalty, the absolute value function for
* an L1 penalty, etc.
*
* TODO(mikebosse): When the loss function has as input the norm of the
* TODO(mikebosse): When the residual function has as input the norm of the
* residual vector, then it prevents implementations of asymmeric loss
* functions. It would be better for this function to accept the vector and
* internally call the norm if necessary.
@ -85,10 +85,11 @@ class GTSAM_EXPORT Base {
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
virtual double residual(double distance) const { return loss(distance); };
#endif
/*
* This method is responsible for returning the weight function for a given
* amount of error. The weight function is related to the analytic derivative
* of the loss function. See
* of the residual function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with
* robust penalties using iteratively re-weighted least squares.
@ -126,7 +127,7 @@ class GTSAM_EXPORT Base {
}
};
/// Null class is the L2 norm and not robust; equivalent to a Gaussian noise model with no loss function.
/// Null class is not robust so is a Gaussian ?
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
@ -134,7 +135,7 @@ class GTSAM_EXPORT Null : public Base {
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
~Null() {}
double weight(double /*error*/) const { return 1.0; }
double loss(double error) const { return 0.5 * error * error; }
double residual(double error) const { return error; }
void print(const std::string &s) const;
bool equals(const Base & /*expected*/, double /*tol*/) const { return true; }
static shared_ptr Create();

View File

@ -467,10 +467,10 @@ TEST(NoiseModel, robustFunctionFair)
DOUBLES_EQUAL(0.3333333333333333, fair->weight(error3), 1e-8);
DOUBLES_EQUAL(0.8333333333333333, fair->weight(error4), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->loss(error1), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->loss(error2), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->loss(error3), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->loss(error4), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error1), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error2), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error3), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionHuber)
@ -483,10 +483,10 @@ TEST(NoiseModel, robustFunctionHuber)
DOUBLES_EQUAL(0.5, huber->weight(error3), 1e-8);
DOUBLES_EQUAL(1.0, huber->weight(error4), 1e-8);
DOUBLES_EQUAL(0.5000, huber->loss(error1), 1e-8);
DOUBLES_EQUAL(37.5000, huber->loss(error2), 1e-8);
DOUBLES_EQUAL(37.5000, huber->loss(error3), 1e-8);
DOUBLES_EQUAL(0.5000, huber->loss(error4), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error1), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error2), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error3), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionCauchy)
@ -499,10 +499,10 @@ TEST(NoiseModel, robustFunctionCauchy)
DOUBLES_EQUAL(0.2000, cauchy->weight(error3), 1e-8);
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error1), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error2), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error3), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error4), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error1), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error2), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionGemanMcClure)
@ -514,10 +514,10 @@ TEST(NoiseModel, robustFunctionGemanMcClure)
DOUBLES_EQUAL(9.80296e-5, gmc->weight(error3), 1e-8);
DOUBLES_EQUAL(0.25 , gmc->weight(error4), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->loss(error1), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->loss(error2), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->loss(error3), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->loss(error4), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error1), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error2), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error3), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionWelsch)
@ -530,10 +530,10 @@ TEST(NoiseModel, robustFunctionWelsch)
DOUBLES_EQUAL(0.018315638888734, welsch->weight(error3), 1e-8);
DOUBLES_EQUAL(0.960789439152323, welsch->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->loss(error1), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->loss(error2), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->loss(error3), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->loss(error4), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error1), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error2), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionTukey)
@ -546,10 +546,10 @@ TEST(NoiseModel, robustFunctionTukey)
DOUBLES_EQUAL(0.0, tukey->weight(error3), 1e-8);
DOUBLES_EQUAL(0.9216, tukey->weight(error4), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error1), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->loss(error2), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->loss(error3), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error4), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error1), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error2), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error3), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionDCS)
@ -560,8 +560,8 @@ TEST(NoiseModel, robustFunctionDCS)
DOUBLES_EQUAL(1.0 , dcs->weight(error1), 1e-8);
DOUBLES_EQUAL(0.00039211, dcs->weight(error2), 1e-8);
DOUBLES_EQUAL(0.5 , dcs->loss(error1), 1e-8);
DOUBLES_EQUAL(0.9900990099, dcs->loss(error2), 1e-8);
DOUBLES_EQUAL(0.5 , dcs->residual(error1), 1e-8);
DOUBLES_EQUAL(0.9900990099, dcs->residual(error2), 1e-8);
}
TEST(NoiseModel, robustFunctionL2WithDeadZone)
@ -576,12 +576,12 @@ TEST(NoiseModel, robustFunctionL2WithDeadZone)
DOUBLES_EQUAL(0.00990099009, lsdz->weight(e4), 1e-8);
DOUBLES_EQUAL(0.9, lsdz->weight(e5), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->loss(e0), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->loss(e1), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->loss(e2), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->loss(e3), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->loss(e4), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->loss(e5), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->residual(e0), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->residual(e1), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->residual(e2), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->residual(e3), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->residual(e4), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->residual(e5), 1e-8);
}
/* ************************************************************************* */

View File

@ -48,7 +48,7 @@ function plot_m_estimator(x, model, plot_title, fig_id, filename)
rho = zeros(size(x));
for i = 1:size(x, 2)
w(i) = model.weight(x(i));
rho(i) = model.loss(x(i));
rho(i) = model.residual(x(i));
end
psi = w .* x;