made State a shared pointer, although, I am really annoyed that it implies a double copy at every update/predict
parent
53e6c6047f
commit
f260af51c3
|
@ -27,6 +27,8 @@
|
|||
#include <gtsam/linear/HessianFactor.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using namespace std;
|
||||
|
@ -42,7 +44,8 @@ namespace gtsam {
|
|||
// As this is a filter, all we need is the posterior P(x_t),
|
||||
// so we just keep the root of the Bayes net
|
||||
GaussianConditional::shared_ptr conditional = bayesNet->back();
|
||||
return *conditional;
|
||||
// TODO: awful ! A copy constructor followed by ANOTHER copy constructor in make_shared?
|
||||
return boost::make_shared<GaussianDensity>(*conditional);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -53,7 +56,7 @@ namespace gtsam {
|
|||
GaussianFactorGraph factorGraph;
|
||||
|
||||
// push back previous solution and new factor
|
||||
factorGraph.push_back(p.toFactor());
|
||||
factorGraph.push_back(p->toFactor());
|
||||
factorGraph.push_back(GaussianFactor::shared_ptr(newFactor));
|
||||
|
||||
// Eliminate graph in order x0, x1, to get Bayes net P(x0|x1)P(x1)
|
||||
|
@ -83,7 +86,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
void KalmanFilter::print(const string& s) const {
|
||||
cout << s << ", " << n_ << "-dimensional Kalman filter\n";
|
||||
cout << "KalmanFilter " << s << ", dim = " << n_ << endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -52,7 +52,10 @@ namespace gtsam {
|
|||
QR, LDL
|
||||
};
|
||||
|
||||
typedef GaussianDensity State;
|
||||
/**
|
||||
* The Kalman filter state is simply a GaussianDensity
|
||||
*/
|
||||
typedef GaussianDensity::shared_ptr State;
|
||||
|
||||
private:
|
||||
|
||||
|
@ -60,7 +63,9 @@ namespace gtsam {
|
|||
const Matrix I_; /** identity matrix of size n*n */
|
||||
const Factorization method_; /** algorithm */
|
||||
|
||||
bool useQR() const { return method_==QR; }
|
||||
bool useQR() const {
|
||||
return method_ == QR;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
|
@ -86,7 +91,7 @@ namespace gtsam {
|
|||
|
||||
/** Return step index k, starts at 0, incremented at each predict. */
|
||||
static Index step(const State& p) {
|
||||
return p.firstFrontalKey();
|
||||
return p->firstFrontalKey();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -98,8 +103,8 @@ namespace gtsam {
|
|||
* where F is the state transition model/matrix, B is the control input model,
|
||||
* and w is zero-mean, Gaussian white noise with covariance Q.
|
||||
*/
|
||||
KalmanFilter::State predict(const State& p, const Matrix& F,
|
||||
const Matrix& B, const Vector& u, const SharedDiagonal& modelQ);
|
||||
State predict(const State& p, const Matrix& F, const Matrix& B,
|
||||
const Vector& u, const SharedDiagonal& modelQ);
|
||||
|
||||
/*
|
||||
* Version of predict with full covariance
|
||||
|
@ -107,8 +112,8 @@ namespace gtsam {
|
|||
* physical property, such as velocity or acceleration, and G is derived from physics.
|
||||
* This version allows more realistic models than a diagonal covariance matrix.
|
||||
*/
|
||||
KalmanFilter::State predictQ(const State& p, const Matrix& F,
|
||||
const Matrix& B, const Vector& u, const Matrix& Q);
|
||||
State predictQ(const State& p, const Matrix& F, const Matrix& B,
|
||||
const Vector& u, const Matrix& Q);
|
||||
|
||||
/**
|
||||
* Predict the state P(x_{t+1}|Z^t)
|
||||
|
@ -118,8 +123,8 @@ namespace gtsam {
|
|||
* This version of predict takes GaussianFactor motion model [A0 A1 b]
|
||||
* with an optional noise model.
|
||||
*/
|
||||
KalmanFilter::State predict2(const State& p, const Matrix& A0,
|
||||
const Matrix& A1, const Vector& b, const SharedDiagonal& model);
|
||||
State predict2(const State& p, const Matrix& A0, const Matrix& A1,
|
||||
const Vector& b, const SharedDiagonal& model);
|
||||
|
||||
/**
|
||||
* Update Kalman filter with a measurement
|
||||
|
@ -129,9 +134,8 @@ namespace gtsam {
|
|||
* Gaussian white noise with covariance R.
|
||||
* Currently, R is restricted to diagonal Gaussians (model parameter)
|
||||
*/
|
||||
KalmanFilter::State update(const State& p, const Matrix& H, const Vector& z,
|
||||
State update(const State& p, const Matrix& H, const Vector& z,
|
||||
const SharedDiagonal& model);
|
||||
|
||||
};
|
||||
|
||||
} // \namespace gtsam
|
||||
|
|
|
@ -49,17 +49,17 @@ TEST( KalmanFilter, constructor ) {
|
|||
KalmanFilter::State p1 = kf1.init(x_initial, P1);
|
||||
|
||||
// Assert it has the correct mean, covariance and information
|
||||
EXPECT(assert_equal(x_initial, p1.mean()));
|
||||
EXPECT(assert_equal(x_initial, p1->mean()));
|
||||
Matrix Sigma = Matrix_(2, 2, 0.01, 0.0, 0.0, 0.01);
|
||||
EXPECT(assert_equal(Sigma, p1.covariance()));
|
||||
EXPECT(assert_equal(inverse(Sigma), p1.information()));
|
||||
EXPECT(assert_equal(Sigma, p1->covariance()));
|
||||
EXPECT(assert_equal(inverse(Sigma), p1->information()));
|
||||
|
||||
// Create one with a sharedGaussian
|
||||
KalmanFilter::State p2 = kf1.init(x_initial, Sigma);
|
||||
EXPECT(assert_equal(Sigma, p2.covariance()));
|
||||
EXPECT(assert_equal(Sigma, p2->covariance()));
|
||||
|
||||
// Now make sure both agree
|
||||
EXPECT(assert_equal(p1.covariance(), p2.covariance()));
|
||||
EXPECT(assert_equal(p1->covariance(), p2->covariance()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -103,32 +103,32 @@ TEST( KalmanFilter, linear1 ) {
|
|||
|
||||
// Create initial KalmanFilter object
|
||||
KalmanFilter::State p0 = kf.init(x_initial, P_initial);
|
||||
EXPECT(assert_equal(expected0, p0.mean()));
|
||||
EXPECT(assert_equal(P00, p0.covariance()));
|
||||
EXPECT(assert_equal(expected0, p0->mean()));
|
||||
EXPECT(assert_equal(P00, p0->covariance()));
|
||||
|
||||
// Run iteration 1
|
||||
KalmanFilter::State p1p = kf.predict(p0, F, B, u, modelQ);
|
||||
EXPECT(assert_equal(expected1, p1p.mean()));
|
||||
EXPECT(assert_equal(P01, p1p.covariance()));
|
||||
EXPECT(assert_equal(expected1, p1p->mean()));
|
||||
EXPECT(assert_equal(P01, p1p->covariance()));
|
||||
|
||||
KalmanFilter::State p1 = kf.update(p1p, H, z1, modelR);
|
||||
EXPECT(assert_equal(expected1, p1.mean()));
|
||||
EXPECT(assert_equal(I11, p1.information()));
|
||||
EXPECT(assert_equal(expected1, p1->mean()));
|
||||
EXPECT(assert_equal(I11, p1->information()));
|
||||
|
||||
// Run iteration 2 (with full covariance)
|
||||
KalmanFilter::State p2p = kf.predictQ(p1, F, B, u, Q);
|
||||
EXPECT(assert_equal(expected2, p2p.mean()));
|
||||
EXPECT(assert_equal(expected2, p2p->mean()));
|
||||
|
||||
KalmanFilter::State p2 = kf.update(p2p, H, z2, modelR);
|
||||
EXPECT(assert_equal(expected2, p2.mean()));
|
||||
EXPECT(assert_equal(expected2, p2->mean()));
|
||||
|
||||
// Run iteration 3
|
||||
KalmanFilter::State p3p = kf.predict(p2, F, B, u, modelQ);
|
||||
EXPECT(assert_equal(expected3, p3p.mean()));
|
||||
EXPECT(assert_equal(expected3, p3p->mean()));
|
||||
LONGS_EQUAL(3, KalmanFilter::step(p3p));
|
||||
|
||||
KalmanFilter::State p3 = kf.update(p3p, H, z3, modelR);
|
||||
EXPECT(assert_equal(expected3, p3.mean()));
|
||||
EXPECT(assert_equal(expected3, p3->mean()));
|
||||
LONGS_EQUAL(3, KalmanFilter::step(p3));
|
||||
}
|
||||
|
||||
|
@ -160,8 +160,8 @@ TEST( KalmanFilter, predict ) {
|
|||
Vector b = R*B*u;
|
||||
SharedDiagonal nop = noiseModel::Isotropic::Sigma(2, 1.0);
|
||||
KalmanFilter::State pb = kf.predict2(p0, A1, A2, b, nop);
|
||||
EXPECT(assert_equal(pa.mean(), pb.mean()));
|
||||
EXPECT(assert_equal(pa.covariance(), pb.covariance()));
|
||||
EXPECT(assert_equal(pa->mean(), pb->mean()));
|
||||
EXPECT(assert_equal(pa->covariance(), pb->covariance()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -216,13 +216,13 @@ TEST( KalmanFilter, QRvsCholesky ) {
|
|||
KalmanFilter::State pb = kfb.predictQ(p0b, Psi_k, B, u, dt_Q_k);
|
||||
|
||||
// Check that they yield the same mean and information matrix
|
||||
EXPECT(assert_equal(pa.mean(), pb.mean()));
|
||||
EXPECT(assert_equal(pa.information(), pb.information(), 1e-7));
|
||||
EXPECT(assert_equal(pa->mean(), pb->mean()));
|
||||
EXPECT(assert_equal(pa->information(), pb->information(), 1e-7));
|
||||
|
||||
// and in addition attain the correct covariance
|
||||
Vector expectedMean = Vector_(9, 0.9814, 1.0200, 1.0190, 1., 1., 1., 1., 1., 1.);
|
||||
EXPECT(assert_equal(expectedMean, pa.mean(), 1e-7));
|
||||
EXPECT(assert_equal(expectedMean, pb.mean(), 1e-7));
|
||||
EXPECT(assert_equal(expectedMean, pa->mean(), 1e-7));
|
||||
EXPECT(assert_equal(expectedMean, pb->mean(), 1e-7));
|
||||
Matrix expected = 1e-6*Matrix_(9, 9,
|
||||
48.8, -3.1, -0.0, -0.4, -0.4, 0.0, 0.0, 63.8, -0.6,
|
||||
-3.1, 148.4, -0.3, 0.5, 1.7, 0.2, -63.8, 0.0, -0.1,
|
||||
|
@ -233,8 +233,8 @@ TEST( KalmanFilter, QRvsCholesky ) {
|
|||
0.0, -63.8, 0.0, 0.0, 0.0, 0.0, 647.2, 0.0, 0.0,
|
||||
63.8, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 647.2, 0.0,
|
||||
-0.6, -0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 647.2);
|
||||
EXPECT(assert_equal(expected, pa.covariance(), 1e-7));
|
||||
EXPECT(assert_equal(expected, pb.covariance(), 1e-7));
|
||||
EXPECT(assert_equal(expected, pa->covariance(), 1e-7));
|
||||
EXPECT(assert_equal(expected, pb->covariance(), 1e-7));
|
||||
|
||||
// prepare update
|
||||
Matrix H = 1e-3*Matrix_(3, 9,
|
||||
|
@ -250,13 +250,13 @@ TEST( KalmanFilter, QRvsCholesky ) {
|
|||
KalmanFilter::State pb2 = kfb.update(pb, H, z, modelR);
|
||||
|
||||
// Check that they yield the same mean and information matrix
|
||||
EXPECT(assert_equal(pa2.mean(), pb2.mean()));
|
||||
EXPECT(assert_equal(pa2.information(), pb2.information(), 1e-7));
|
||||
EXPECT(assert_equal(pa2->mean(), pb2->mean()));
|
||||
EXPECT(assert_equal(pa2->information(), pb2->information(), 1e-7));
|
||||
|
||||
// and in addition attain the correct mean and covariance
|
||||
Vector expectedMean2 = Vector_(9, 0.9207, 0.9030, 1.0178, 1.0002, 0.9992, 0.9998, 0.9981, 1.0035, 0.9882);
|
||||
EXPECT(assert_equal(expectedMean2, pa2.mean(), 1e-4));// not happy with tolerance here !
|
||||
EXPECT(assert_equal(expectedMean2, pb2.mean(), 1e-4));// is something still amiss?
|
||||
EXPECT(assert_equal(expectedMean2, pa2->mean(), 1e-4));// not happy with tolerance here !
|
||||
EXPECT(assert_equal(expectedMean2, pb2->mean(), 1e-4));// is something still amiss?
|
||||
Matrix expected2 = 1e-6*Matrix_(9, 9,
|
||||
46.1, -2.6, -0.0, -0.4, -0.4, 0.0, 0.0, 63.9, -0.5,
|
||||
-2.6, 132.8, -0.5, 0.4, 1.5, 0.2, -64.0, -0.0, -0.1,
|
||||
|
@ -267,8 +267,8 @@ TEST( KalmanFilter, QRvsCholesky ) {
|
|||
0.0, -64.0, -0.0, -0.0, -0.0, -0.0, 647.2, -0.0, 0.0,
|
||||
63.9, -0.0, 0.1, -0.0, -0.0, 0.0, -0.0, 647.2, 0.1,
|
||||
-0.5, -0.1, 0.0, -0.0, -0.0, 0.0, 0.0, 0.1, 635.8);
|
||||
EXPECT(assert_equal(expected2, pa2.covariance(), 1e-7));
|
||||
EXPECT(assert_equal(expected2, pb2.covariance(), 1e-7));
|
||||
EXPECT(assert_equal(expected2, pa2->covariance(), 1e-7));
|
||||
EXPECT(assert_equal(expected2, pb2->covariance(), 1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
Loading…
Reference in New Issue