diff --git a/.cproject b/.cproject
index ff704cea8..ce0ab3c4c 100644
--- a/.cproject
+++ b/.cproject
@@ -584,6 +584,7 @@
make
+
tests/testBayesTree.run
true
false
@@ -591,6 +592,7 @@
make
+
testBinaryBayesNet.run
true
false
@@ -638,6 +640,7 @@
make
+
testSymbolicBayesNet.run
true
false
@@ -645,6 +648,7 @@
make
+
tests/testSymbolicFactor.run
true
false
@@ -652,6 +656,7 @@
make
+
testSymbolicFactorGraph.run
true
false
@@ -667,6 +672,7 @@
make
+
tests/testBayesTree
true
false
@@ -1098,6 +1104,7 @@
make
+
testErrors.run
true
false
@@ -1327,6 +1334,46 @@
true
true
+
+ make
+ -j5
+ testBTree.run
+ true
+ true
+ true
+
+
+ make
+ -j5
+ testDSF.run
+ true
+ true
+ true
+
+
+ make
+ -j5
+ testDSFMap.run
+ true
+ true
+ true
+
+
+ make
+ -j5
+ testDSFVector.run
+ true
+ true
+ true
+
+
+ make
+ -j5
+ testFixedVector.run
+ true
+ true
+ true
+
make
-j2
@@ -1409,7 +1456,6 @@
make
-
testSimulated2DOriented.run
true
false
@@ -1449,7 +1495,6 @@
make
-
testSimulated2D.run
true
false
@@ -1457,7 +1502,6 @@
make
-
testSimulated3D.run
true
false
@@ -1471,46 +1515,6 @@
true
true
-
- make
- -j5
- testBTree.run
- true
- true
- true
-
-
- make
- -j5
- testDSF.run
- true
- true
- true
-
-
- make
- -j5
- testDSFMap.run
- true
- true
- true
-
-
- make
- -j5
- testDSFVector.run
- true
- true
- true
-
-
- make
- -j5
- testFixedVector.run
- true
- true
- true
-
make
-j5
@@ -1551,6 +1555,14 @@
false
true
+
+ make
+ -j4
+ testLabeledSymbol.run
+ true
+ true
+ true
+
make
-j2
@@ -1768,6 +1780,7 @@
cpack
+
-G DEB
true
false
@@ -1775,6 +1788,7 @@
cpack
+
-G RPM
true
false
@@ -1782,6 +1796,7 @@
cpack
+
-G TGZ
true
false
@@ -1789,6 +1804,7 @@
cpack
+
--config CPackSourceConfig.cmake
true
false
@@ -2659,6 +2675,7 @@
make
+
testGraph.run
true
false
@@ -2666,6 +2683,7 @@
make
+
testJunctionTree.run
true
false
@@ -2673,6 +2691,7 @@
make
+
testSymbolicBayesNetB.run
true
false
@@ -3272,7 +3291,6 @@
make
-
tests/testGaussianISAM2
true
false
diff --git a/gtsam/base/Matrix.h b/gtsam/base/Matrix.h
index 20a4a6bc4..c3cbfa341 100644
--- a/gtsam/base/Matrix.h
+++ b/gtsam/base/Matrix.h
@@ -238,7 +238,7 @@ Eigen::Block sub(const MATRIX& A, size_t i1, size_t i2, size_t j1,
* @param j is the column of the upper left corner insert location
*/
template
-GTSAM_EXPORT void insertSub(Eigen::MatrixBase& fullMatrix, const Eigen::MatrixBase& subMatrix, size_t i, size_t j) {
+void insertSub(Eigen::MatrixBase& fullMatrix, const Eigen::MatrixBase& subMatrix, size_t i, size_t j) {
fullMatrix.block(i, j, subMatrix.rows(), subMatrix.cols()) = subMatrix;
}
diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h
index d930c815e..d30bd4167 100644
--- a/gtsam/geometry/Pose3.h
+++ b/gtsam/geometry/Pose3.h
@@ -323,7 +323,6 @@ typedef std::pair Point3Pair;
GTSAM_EXPORT boost::optional align(const std::vector& pairs);
template<>
-struct traits : public internal::LieGroupTraits {
-};
+struct traits : public internal::LieGroupTraits {};
} // namespace gtsam
diff --git a/gtsam/inference/LabeledSymbol.cpp b/gtsam/inference/LabeledSymbol.cpp
index 7e573c13f..b9e93ceb1 100644
--- a/gtsam/inference/LabeledSymbol.cpp
+++ b/gtsam/inference/LabeledSymbol.cpp
@@ -17,12 +17,8 @@
#include
-#include
#include
-#include
-#include
-#include
-#include
+#include
#include
@@ -111,23 +107,21 @@ bool LabeledSymbol::operator!=(gtsam::Key comp) const {
}
/* ************************************************************************* */
+static LabeledSymbol make(gtsam::Key key) { return LabeledSymbol(key);}
+
boost::function LabeledSymbol::TypeTest(unsigned char c) {
- namespace bl = boost::lambda;
- return bl::bind(&LabeledSymbol::chr, bl::bind(bl::constructor(), bl::_1)) == c;
+ return boost::bind(&LabeledSymbol::chr, boost::bind(make, _1)) == c;
}
-/* ************************************************************************* */
boost::function LabeledSymbol::LabelTest(unsigned char label) {
- namespace bl = boost::lambda;
- return bl::bind(&LabeledSymbol::label, bl::bind(bl::constructor(), bl::_1)) == label;
+ return boost::bind(&LabeledSymbol::label, boost::bind(make, _1)) == label;
}
-/* ************************************************************************* */
boost::function LabeledSymbol::TypeLabelTest(unsigned char c, unsigned char label) {
- namespace bl = boost::lambda;
- return bl::bind(&LabeledSymbol::chr, bl::bind(bl::constructor(), bl::_1)) == c &&
- bl::bind(&LabeledSymbol::label, bl::bind(bl::constructor(), bl::_1)) == label;
+ return boost::bind(&LabeledSymbol::chr, boost::bind(make, _1)) == c &&
+ boost::bind(&LabeledSymbol::label, boost::bind(make, _1)) == label;
}
+/* ************************************************************************* */
} // \namespace gtsam
diff --git a/gtsam/inference/Symbol.cpp b/gtsam/inference/Symbol.cpp
index 37a6d0897..f8b37d429 100644
--- a/gtsam/inference/Symbol.cpp
+++ b/gtsam/inference/Symbol.cpp
@@ -18,19 +18,8 @@
#include
-#include
#include
-#include
-#include
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-variable"
-#endif
-#include
-#include
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#include
#include
#include
@@ -71,10 +60,10 @@ Symbol::operator std::string() const {
return str(boost::format("%c%d") % c_ % j_);
}
+static Symbol make(gtsam::Key key) { return Symbol(key);}
+
boost::function Symbol::ChrTest(unsigned char c) {
- namespace bl = boost::lambda;
- return bl::bind(&Symbol::chr, bl::bind(bl::constructor(), bl::_1))
- == c;
+ return bind(&Symbol::chr, bind(make, _1)) == c;
}
} // namespace gtsam
diff --git a/gtsam/inference/tests/testKey.cpp b/gtsam/inference/tests/testKey.cpp
index 5b57096cb..1033c0cc9 100644
--- a/gtsam/inference/tests/testKey.cpp
+++ b/gtsam/inference/tests/testKey.cpp
@@ -14,14 +14,14 @@
* @author Alex Cunningham
*/
-#include // for operator +=
-using namespace boost::assign;
-
-#include
+#include
#include
#include
-#include
+#include
+
+#include // for operator +=
+using namespace boost::assign;
using namespace std;
using namespace gtsam;
@@ -65,6 +65,13 @@ TEST(Key, KeySymbolEncoding) {
EXPECT(assert_equal(symbol, Symbol(key)));
}
+/* ************************************************************************* */
+TEST(Key, ChrTest) {
+ Key key = Symbol('c',3);
+ EXPECT(Symbol::ChrTest('c')(key));
+ EXPECT(!Symbol::ChrTest('d')(key));
+}
+
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
/* ************************************************************************* */
diff --git a/gtsam/inference/tests/testLabeledSymbol.cpp b/gtsam/inference/tests/testLabeledSymbol.cpp
index 07727c8dc..18216453d 100644
--- a/gtsam/inference/tests/testLabeledSymbol.cpp
+++ b/gtsam/inference/tests/testLabeledSymbol.cpp
@@ -14,20 +14,19 @@
* @author Alex Cunningham
*/
-#include // for operator +=
-using namespace boost::assign;
-
-#include
+#include
#include
#include
-#include
-
+#include
+#include // for operator +=
+
+using namespace boost::assign;
using namespace std;
using namespace gtsam;
/* ************************************************************************* */
-TEST( testLabeledSymbol, KeyLabeledSymbolConversion ) {
+TEST(LabeledSymbol, KeyLabeledSymbolConversion ) {
LabeledSymbol expected('x', 'A', 4);
Key key(expected);
LabeledSymbol actual(key);
@@ -36,7 +35,7 @@ TEST( testLabeledSymbol, KeyLabeledSymbolConversion ) {
}
/* ************************************************************************* */
-TEST( testLabeledSymbol, KeyLabeledSymbolEncoding ) {
+TEST(LabeledSymbol, KeyLabeledSymbolEncoding ) {
// Test encoding of LabeledSymbol <-> size_t <-> string
// Encoding scheme:
@@ -69,6 +68,17 @@ TEST( testLabeledSymbol, KeyLabeledSymbolEncoding ) {
}
}
+/* ************************************************************************* */
+TEST(LabeledSymbol, ChrTest) {
+ Key key = LabeledSymbol('c','A',3);
+ EXPECT(LabeledSymbol::TypeTest('c')(key));
+ EXPECT(!LabeledSymbol::TypeTest('d')(key));
+ EXPECT(LabeledSymbol::LabelTest('A')(key));
+ EXPECT(!LabeledSymbol::LabelTest('D')(key));
+ EXPECT(LabeledSymbol::TypeLabelTest('c','A')(key));
+ EXPECT(!LabeledSymbol::TypeLabelTest('c','D')(key));
+}
+
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
/* ************************************************************************* */
diff --git a/gtsam/linear/GaussianFactor.h b/gtsam/linear/GaussianFactor.h
index 16b1f759a..c67636341 100644
--- a/gtsam/linear/GaussianFactor.h
+++ b/gtsam/linear/GaussianFactor.h
@@ -100,7 +100,7 @@ namespace gtsam {
/// Return the diagonal of the Hessian for this factor
virtual VectorValues hessianDiagonal() const = 0;
- /// Return the diagonal of the Hessian for this factor (raw memory version)
+ /// Raw memory access version of hessianDiagonal
virtual void hessianDiagonal(double* d) const = 0;
/// Return the block diagonal of the Hessian for this factor
@@ -122,16 +122,10 @@ namespace gtsam {
/// y += alpha * A'*A*x
virtual void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const = 0;
- /// y += alpha * A'*A*x
- virtual void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys) const = 0;
-
- /// y += alpha * A'*A*x
- virtual void multiplyHessianAdd(double alpha, const double* x, double* y) const = 0;
-
/// A'*b for Jacobian, eta for Hessian
virtual VectorValues gradientAtZero() const = 0;
- /// A'*b for Jacobian, eta for Hessian (raw memory version)
+ /// Raw memory access version of gradientAtZero
virtual void gradientAtZero(double* d) const = 0;
/// Gradient wrt a key at any values
diff --git a/gtsam/linear/GaussianFactorGraph.cpp b/gtsam/linear/GaussianFactorGraph.cpp
index 2879e0a4c..6953d2969 100644
--- a/gtsam/linear/GaussianFactorGraph.cpp
+++ b/gtsam/linear/GaussianFactorGraph.cpp
@@ -357,15 +357,6 @@ namespace gtsam {
f->multiplyHessianAdd(alpha, x, y);
}
- /* ************************************************************************* */
- void GaussianFactorGraph::multiplyHessianAdd(double alpha,
- const double* x, double* y) const {
- vector FactorKeys = getkeydim();
- BOOST_FOREACH(const GaussianFactor::shared_ptr& f, *this)
- f->multiplyHessianAdd(alpha, x, y, FactorKeys);
-
- }
-
/* ************************************************************************* */
void GaussianFactorGraph::multiplyInPlace(const VectorValues& x, Errors& e) const {
multiplyInPlace(x, e.begin());
diff --git a/gtsam/linear/GaussianFactorGraph.h b/gtsam/linear/GaussianFactorGraph.h
index 8653e3629..811c0f8b0 100644
--- a/gtsam/linear/GaussianFactorGraph.h
+++ b/gtsam/linear/GaussianFactorGraph.h
@@ -310,10 +310,6 @@ namespace gtsam {
void multiplyHessianAdd(double alpha, const VectorValues& x,
VectorValues& y) const;
- ///** y += alpha*A'A*x */
- void multiplyHessianAdd(double alpha, const double* x,
- double* y) const;
-
///** In-place version e <- A*x that overwrites e. */
void multiplyInPlace(const VectorValues& x, Errors& e) const;
diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp
index 6d7f3c2e1..453e59892 100644
--- a/gtsam/linear/HessianFactor.cpp
+++ b/gtsam/linear/HessianFactor.cpp
@@ -359,20 +359,10 @@ VectorValues HessianFactor::hessianDiagonal() const {
}
/* ************************************************************************* */
-// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
+// Raw memory access version should be called in Regular Factors only currently
void HessianFactor::hessianDiagonal(double* d) const {
-
- // Use eigen magic to access raw memory
- typedef Eigen::Matrix DVector;
- typedef Eigen::Map DMap;
-
- // Loop over all variables in the factor
- for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
- Key j = keys_[pos];
- // Get the diagonal block, and insert its diagonal
- const Matrix& B = info_(pos, pos).selfadjointView();
- DMap(d + 9 * j) += B.diagonal();
- }
+ throw std::runtime_error(
+ "HessianFactor::hessianDiagonal raw memory access is allowed for Regular Factors only");
}
/* ************************************************************************* */
@@ -548,48 +538,6 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
}
}
-/* ************************************************************************* */
-void HessianFactor::multiplyHessianAdd(double alpha, const double* x,
- double* yvalues, vector offsets) const {
-
- // Use eigen magic to access raw memory
- typedef Eigen::Matrix DVector;
- typedef Eigen::Map DMap;
- typedef Eigen::Map ConstDMap;
-
- // Create a vector of temporary y values, corresponding to rows i
- vector y;
- y.reserve(size());
- for (const_iterator it = begin(); it != end(); it++)
- y.push_back(zero(getDim(it)));
-
- // Accessing the VectorValues one by one is expensive
- // So we will loop over columns to access x only once per column
- // And fill the above temporary y values, to be added into yvalues after
- for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
- DenseIndex i = 0;
- for (; i < j; ++i)
- y[i] += info_(i, j).knownOffDiagonal()
- * ConstDMap(x + offsets[keys_[j]],
- offsets[keys_[j] + 1] - offsets[keys_[j]]);
- // blocks on the diagonal are only half
- y[i] += info_(j, j).selfadjointView()
- * ConstDMap(x + offsets[keys_[j]],
- offsets[keys_[j] + 1] - offsets[keys_[j]]);
- // for below diagonal, we take transpose block from upper triangular part
- for (i = j + 1; i < (DenseIndex) size(); ++i)
- y[i] += info_(i, j).knownOffDiagonal()
- * ConstDMap(x + offsets[keys_[j]],
- offsets[keys_[j] + 1] - offsets[keys_[j]]);
- }
-
- // copy to yvalues
- for (DenseIndex i = 0; i < (DenseIndex) size(); ++i)
- DMap(yvalues + offsets[keys_[i]], offsets[keys_[i] + 1] - offsets[keys_[i]]) +=
- alpha * y[i];
-}
-
-
/* ************************************************************************* */
VectorValues HessianFactor::gradientAtZero() const {
VectorValues g;
@@ -600,20 +548,10 @@ VectorValues HessianFactor::gradientAtZero() const {
}
/* ************************************************************************* */
-// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
+// Raw memory access version should be called in Regular Factors only currently
void HessianFactor::gradientAtZero(double* d) const {
-
- // Use eigen magic to access raw memory
- typedef Eigen::Matrix DVector;
- typedef Eigen::Map DMap;
-
- // Loop over all variables in the factor
- for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
- Key j = keys_[pos];
- // Get the diagonal block, and insert its diagonal
- DVector dj = -info_(pos,size()).knownOffDiagonal();
- DMap(d + 9 * j) += dj;
- }
+ throw std::runtime_error(
+ "HessianFactor::gradientAtZero raw memory access is allowed for Regular Factors only");
}
/* ************************************************************************* */
diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h
index ec4710dd7..553978e37 100644
--- a/gtsam/linear/HessianFactor.h
+++ b/gtsam/linear/HessianFactor.h
@@ -340,7 +340,7 @@ namespace gtsam {
/// Return the diagonal of the Hessian for this factor
virtual VectorValues hessianDiagonal() const;
- /* ************************************************************************* */
+ /// Raw memory access version of hessianDiagonal
virtual void hessianDiagonal(double* d) const;
/// Return the block diagonal of the Hessian for this factor
@@ -380,13 +380,10 @@ namespace gtsam {
/** y += alpha * A'*A*x */
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
- void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys) const;
-
- void multiplyHessianAdd(double alpha, const double* x, double* y) const {};
-
/// eta for Hessian
VectorValues gradientAtZero() const;
+ /// Raw memory access version of gradientAtZero
virtual void gradientAtZero(double* d) const;
/**
diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp
index 566a98fc2..eba06f99a 100644
--- a/gtsam/linear/JacobianFactor.cpp
+++ b/gtsam/linear/JacobianFactor.cpp
@@ -461,22 +461,9 @@ VectorValues JacobianFactor::hessianDiagonal() const {
}
/* ************************************************************************* */
-// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
+// Raw memory access version should be called in Regular Factors only currently
void JacobianFactor::hessianDiagonal(double* d) const {
-
- // Use eigen magic to access raw memory
- typedef Eigen::Matrix DVector;
- typedef Eigen::Map DMap;
-
- // Loop over all variables in the factor
- for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
- // Get the diagonal block, and insert its diagonal
- DVector dj;
- for (size_t k = 0; k < 9; ++k)
- dj(k) = Ab_(j).col(k).squaredNorm();
-
- DMap(d + 9 * j) += dj;
- }
+ throw std::runtime_error("JacobianFactor::hessianDiagonal raw memory access is allowed for Regular Factors only");
}
/* ************************************************************************* */
@@ -528,40 +515,6 @@ void JacobianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
transposeMultiplyAdd(alpha, Ax, y);
}
-void JacobianFactor::multiplyHessianAdd(double alpha, const double* x,
- double* y, std::vector keys) const {
-
- // Use eigen magic to access raw memory
- typedef Eigen::Matrix DVector;
- typedef Eigen::Map DMap;
- typedef Eigen::Map ConstDMap;
-
- if (empty())
- return;
- Vector Ax = zero(Ab_.rows());
-
- // Just iterate over all A matrices and multiply in correct config part
- for (size_t pos = 0; pos < size(); ++pos)
- Ax += Ab_(pos)
- * ConstDMap(x + keys[keys_[pos]],
- keys[keys_[pos] + 1] - keys[keys_[pos]]);
-
- // Deal with noise properly, need to Double* whiten as we are dividing by variance
- if (model_) {
- model_->whitenInPlace(Ax);
- model_->whitenInPlace(Ax);
- }
-
- // multiply with alpha
- Ax *= alpha;
-
- // Again iterate over all A matrices and insert Ai^e into y
- for (size_t pos = 0; pos < size(); ++pos)
- DMap(y + keys[keys_[pos]], keys[keys_[pos] + 1] - keys[keys_[pos]]) += Ab_(
- pos).transpose() * Ax;
-
-}
-
/* ************************************************************************* */
VectorValues JacobianFactor::gradientAtZero() const {
VectorValues g;
@@ -574,8 +527,9 @@ VectorValues JacobianFactor::gradientAtZero() const {
}
/* ************************************************************************* */
+// Raw memory access version should be called in Regular Factors only currently
void JacobianFactor::gradientAtZero(double* d) const {
- throw std::runtime_error("Raw memory version of gradientAtZero not implemented for Jacobian factor");
+ throw std::runtime_error("JacobianFactor::gradientAtZero raw memory access is allowed for Regular Factors only");
}
/* ************************************************************************* */
diff --git a/gtsam/linear/JacobianFactor.h b/gtsam/linear/JacobianFactor.h
index 4b44197c6..02ae21566 100644
--- a/gtsam/linear/JacobianFactor.h
+++ b/gtsam/linear/JacobianFactor.h
@@ -283,10 +283,6 @@ namespace gtsam {
/** y += alpha * A'*A*x */
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
- void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys) const;
-
- void multiplyHessianAdd(double alpha, const double* x, double* y) const {};
-
/// A'*b for Jacobian
VectorValues gradientAtZero() const;
diff --git a/gtsam/linear/Preconditioner.cpp b/gtsam/linear/Preconditioner.cpp
index 4c3006a3f..538d886b4 100644
--- a/gtsam/linear/Preconditioner.cpp
+++ b/gtsam/linear/Preconditioner.cpp
@@ -126,7 +126,9 @@ void BlockJacobiPreconditioner::transposeSolve(const Vector& y, Vector& x) const
void BlockJacobiPreconditioner::build(
const GaussianFactorGraph &gfg, const KeyInfo &keyInfo, const std::map &lambda)
{
+ // n is the number of keys
const size_t n = keyInfo.size();
+ // dims_ is a vector that contains the dimension of keys
dims_ = keyInfo.colSpec();
/* prepare the buffer of block diagonals */
diff --git a/gtsam/linear/tests/testGaussianFactorGraph.cpp b/gtsam/linear/tests/testGaussianFactorGraph.cpp
index d87d842de..7404caa14 100644
--- a/gtsam/linear/tests/testGaussianFactorGraph.cpp
+++ b/gtsam/linear/tests/testGaussianFactorGraph.cpp
@@ -315,30 +315,6 @@ TEST( GaussianFactorGraph, multiplyHessianAdd2 )
EXPECT(assert_equal(2*expected, actual));
}
-/* ************************************************************************* */
-TEST( GaussianFactorGraph, multiplyHessianAdd3 )
-{
- GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
-
- // brute force
- Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian();
- Vector X(6); X<<1,2,3,4,5,6;
- Vector Y(6); Y<<-450, -450, 300, 400, 2950, 3450;
- EXPECT(assert_equal(Y,AtA*X));
-
- double* x = &X[0];
-
- Vector fast_y = gtsam::zero(6);
- gfg.multiplyHessianAdd(1.0, x, fast_y.data());
- EXPECT(assert_equal(Y, fast_y));
-
- // now, do it with non-zero y
- gfg.multiplyHessianAdd(1.0, x, fast_y.data());
- EXPECT(assert_equal(2*Y, fast_y));
-
-}
-
-
/* ************************************************************************* */
TEST( GaussianFactorGraph, matricesMixed )
{
@@ -351,7 +327,6 @@ TEST( GaussianFactorGraph, matricesMixed )
EXPECT(assert_equal(A.transpose()*b, eta));
}
-
/* ************************************************************************* */
TEST( GaussianFactorGraph, gradientAtZero )
{
diff --git a/gtsam/slam/RegularHessianFactor.h b/gtsam/slam/RegularHessianFactor.h
index 0e20bb709..f3f90dc2d 100644
--- a/gtsam/slam/RegularHessianFactor.h
+++ b/gtsam/slam/RegularHessianFactor.h
@@ -51,18 +51,12 @@ public:
HessianFactor(keys, augmentedInformation) {
}
- /** y += alpha * A'*A*x */
- void multiplyHessianAdd(double alpha, const VectorValues& x,
- VectorValues& y) const {
- HessianFactor::multiplyHessianAdd(alpha, x, y);
- }
-
// Scratch space for multiplyHessianAdd
typedef Eigen::Matrix DVector;
mutable std::vector y;
- void multiplyHessianAdd(double alpha, const double* x,
- double* yvalues) const {
+ /** y += alpha * A'*A*x */
+ void multiplyHessianAdd(double alpha, const double* x, double* yvalues) const {
// Create a vector of temporary y values, corresponding to rows i
y.resize(size());
BOOST_FOREACH(DVector & yi, y)
@@ -95,6 +89,83 @@ public:
}
}
+ void multiplyHessianAdd(double alpha, const double* x, double* yvalues,
+ std::vector offsets) const {
+
+ // Use eigen magic to access raw memory
+ typedef Eigen::Matrix DVector;
+ typedef Eigen::Map DMap;
+ typedef Eigen::Map ConstDMap;
+
+ // Create a vector of temporary y values, corresponding to rows i
+ std::vector y;
+ y.reserve(size());
+ for (const_iterator it = begin(); it != end(); it++)
+ y.push_back(zero(getDim(it)));
+
+ // Accessing the VectorValues one by one is expensive
+ // So we will loop over columns to access x only once per column
+ // And fill the above temporary y values, to be added into yvalues after
+ for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
+ DenseIndex i = 0;
+ for (; i < j; ++i)
+ y[i] += info_(i, j).knownOffDiagonal()
+ * ConstDMap(x + offsets[keys_[j]],
+ offsets[keys_[j] + 1] - offsets[keys_[j]]);
+ // blocks on the diagonal are only half
+ y[i] += info_(j, j).selfadjointView()
+ * ConstDMap(x + offsets[keys_[j]],
+ offsets[keys_[j] + 1] - offsets[keys_[j]]);
+ // for below diagonal, we take transpose block from upper triangular part
+ for (i = j + 1; i < (DenseIndex) size(); ++i)
+ y[i] += info_(i, j).knownOffDiagonal()
+ * ConstDMap(x + offsets[keys_[j]],
+ offsets[keys_[j] + 1] - offsets[keys_[j]]);
+ }
+
+ // copy to yvalues
+ for (DenseIndex i = 0; i < (DenseIndex) size(); ++i)
+ DMap(yvalues + offsets[keys_[i]], offsets[keys_[i] + 1] - offsets[keys_[i]]) +=
+ alpha * y[i];
+ }
+
+ /** Return the diagonal of the Hessian for this factor (raw memory version) */
+ virtual void hessianDiagonal(double* d) const {
+
+ // Use eigen magic to access raw memory
+ //typedef Eigen::Matrix DVector;
+ typedef Eigen::Matrix DVector;
+ typedef Eigen::Map DMap;
+
+ // Loop over all variables in the factor
+ for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
+ Key j = keys_[pos];
+ // Get the diagonal block, and insert its diagonal
+ const Matrix& B = info_(pos, pos).selfadjointView();
+ //DMap(d + 9 * j) += B.diagonal();
+ DMap(d + D * j) += B.diagonal();
+ }
+ }
+
+ /* ************************************************************************* */
+ // TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
+ virtual void gradientAtZero(double* d) const {
+
+ // Use eigen magic to access raw memory
+ //typedef Eigen::Matrix DVector;
+ typedef Eigen::Matrix DVector;
+ typedef Eigen::Map DMap;
+
+ // Loop over all variables in the factor
+ for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
+ Key j = keys_[pos];
+ // Get the diagonal block, and insert its diagonal
+ VectorD dj = -info_(pos,size()).knownOffDiagonal();
+ //DMap(d + 9 * j) += dj;
+ DMap(d + D * j) += dj;
+ }
+ }
+
};
}
diff --git a/gtsam/slam/RegularImplicitSchurFactor.h b/gtsam/slam/RegularImplicitSchurFactor.h
index 243331dc1..75b2d44ba 100644
--- a/gtsam/slam/RegularImplicitSchurFactor.h
+++ b/gtsam/slam/RegularImplicitSchurFactor.h
@@ -159,7 +159,7 @@ public:
* @brief add the contribution of this factor to the diagonal of the hessian
* d(output) = d(input) + deltaHessianFactor
*/
- void hessianDiagonal(double* d) const {
+ virtual void hessianDiagonal(double* d) const {
// diag(Hessian) = diag(F' * (I - E * PointCov * E') * F);
// Use eigen magic to access raw memory
typedef Eigen::Matrix DVector;
@@ -434,7 +434,7 @@ public:
/**
* Calculate gradient, which is -F'Q*b, see paper - RAW MEMORY ACCESS
*/
- void gradientAtZero(double* d) const {
+ virtual void gradientAtZero(double* d) const {
// Use eigen magic to access raw memory
typedef Eigen::Matrix DVector;
diff --git a/gtsam/slam/RegularJacobianFactor.h b/gtsam/slam/RegularJacobianFactor.h
new file mode 100644
index 000000000..bb275ef3f
--- /dev/null
+++ b/gtsam/slam/RegularJacobianFactor.h
@@ -0,0 +1,174 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file RegularJacobianFactor.h
+ * @brief JacobianFactor class with fixed sized blcoks
+ * @author Sungtae An
+ * @date Nov 11, 2014
+ */
+
+#pragma once
+
+#include
+#include
+#include
+#include
+
+namespace gtsam {
+
+template
+class RegularJacobianFactor: public JacobianFactor {
+
+private:
+
+ /** Use eigen magic to access raw memory */
+ typedef Eigen::Matrix DVector;
+ typedef Eigen::Map DMap;
+ typedef Eigen::Map ConstDMap;
+
+public:
+
+ /** Construct an n-ary factor
+ * @tparam TERMS A container whose value type is std::pair, specifying the
+ * collection of keys and matrices making up the factor. */
+ template
+ RegularJacobianFactor(const TERMS& terms, const Vector& b,
+ const SharedDiagonal& model = SharedDiagonal()) :
+ JacobianFactor(terms, b, model) {
+ }
+
+ /** Constructor with arbitrary number keys, and where the augmented matrix is given all together
+ * instead of in block terms. Note that only the active view of the provided augmented matrix
+ * is used, and that the matrix data is copied into a newly-allocated matrix in the constructed
+ * factor. */
+ template
+ RegularJacobianFactor(const KEYS& keys,
+ const VerticalBlockMatrix& augmentedMatrix,
+ const SharedDiagonal& sigmas = SharedDiagonal()) :
+ JacobianFactor(keys, augmentedMatrix, sigmas) {
+ }
+
+ /** Raw memory access version of multiplyHessianAdd y += alpha * A'*A*x
+ * Note: this is not assuming a fixed dimension for the variables,
+ * but requires the vector accumulatedDims to tell the dimension of
+ * each variable: e.g.: x0 has dim 3, x2 has dim 6, x3 has dim 2,
+ * then accumulatedDims is [0 3 9 11 13]
+ * NOTE: size of accumulatedDims is size of keys + 1!! */
+ void multiplyHessianAdd(double alpha, const double* x, double* y,
+ const std::vector& accumulatedDims) const {
+
+ /// Use eigen magic to access raw memory
+ typedef Eigen::Matrix VectorD;
+ typedef Eigen::Map MapD;
+ typedef Eigen::Map ConstMapD;
+
+ if (empty())
+ return;
+ Vector Ax = zero(Ab_.rows());
+
+ /// Just iterate over all A matrices and multiply in correct config part (looping over keys)
+ /// E.g.: Jacobian A = [A0 A1 A2] multiplies x = [x0 x1 x2]'
+ /// Hence: Ax = A0 x0 + A1 x1 + A2 x2 (hence we loop over the keys and accumulate)
+ for (size_t pos = 0; pos < size(); ++pos)
+ {
+ Ax += Ab_(pos)
+ * ConstMapD(x + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]);
+ }
+ /// Deal with noise properly, need to Double* whiten as we are dividing by variance
+ if (model_) {
+ model_->whitenInPlace(Ax);
+ model_->whitenInPlace(Ax);
+ }
+
+ /// multiply with alpha
+ Ax *= alpha;
+
+ /// Again iterate over all A matrices and insert Ai^T into y
+ for (size_t pos = 0; pos < size(); ++pos){
+ MapD(y + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]) += Ab_(
+ pos).transpose() * Ax;
+ }
+ }
+
+ /** Raw memory access version of multiplyHessianAdd */
+ void multiplyHessianAdd(double alpha, const double* x, double* y) const {
+
+ if (empty()) return;
+ Vector Ax = zero(Ab_.rows());
+
+ // Just iterate over all A matrices and multiply in correct config part
+ for(size_t pos=0; poswhitenInPlace(Ax); model_->whitenInPlace(Ax); }
+
+ // multiply with alpha
+ Ax *= alpha;
+
+ // Again iterate over all A matrices and insert Ai^e into y
+ for(size_t pos=0; poswhiten(column_k);
+ dj(k) = dot(column_k, column_k);
+ }else{
+ dj(k) = Ab_(j).col(k).squaredNorm();
+ }
+ }
+ DMap(d + D * j) += dj;
+ }
+ }
+
+ /** Raw memory access version of gradientAtZero
+ * TODO: currently assumes all variables of the same size D (templated) and keys arranged from 0 to n
+ */
+ virtual void gradientAtZero(double* d) const {
+
+ // Get vector b not weighted
+ Vector b = getb();
+
+ // Whitening b
+ if (model_) {
+ b = model_->whiten(b);
+ b = model_->whiten(b);
+ }
+
+ // Just iterate over all A matrices
+ for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) {
+ DVector dj;
+ // gradient -= A'*b/sigma^2
+ // Computing with each column
+ for (size_t k = 0; k < D; ++k){
+ Vector column_k = Ab_(j).col(k);
+ dj(k) = -1.0*dot(b, column_k);
+ }
+ DMap(d + D * j) += dj;
+ }
+ }
+
+};
+
+}
+
diff --git a/gtsam/slam/tests/testRegularHessianFactor.cpp b/gtsam/slam/tests/testRegularHessianFactor.cpp
new file mode 100644
index 000000000..8dfb753f4
--- /dev/null
+++ b/gtsam/slam/tests/testRegularHessianFactor.cpp
@@ -0,0 +1,100 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file testRegularHessianFactor.cpp
+ * @author Frank Dellaert
+ * @date March 4, 2014
+ */
+
+#include
+#include
+
+//#include
+#include
+
+#include
+#include
+#include
+
+using namespace std;
+using namespace gtsam;
+using namespace boost::assign;
+
+const double tol = 1e-5;
+
+/* ************************************************************************* */
+TEST(RegularHessianFactor, ConstructorNWay)
+{
+ Matrix G11 = (Matrix(2,2) << 111, 112, 113, 114).finished();
+ Matrix G12 = (Matrix(2,2) << 121, 122, 123, 124).finished();
+ Matrix G13 = (Matrix(2,2) << 131, 132, 133, 134).finished();
+
+ Matrix G22 = (Matrix(2,2) << 221, 222, 222, 224).finished();
+ Matrix G23 = (Matrix(2,2) << 231, 232, 233, 234).finished();
+
+ Matrix G33 = (Matrix(2,2) << 331, 332, 332, 334).finished();
+
+ Vector g1 = (Vector(2) << -7, -9).finished();
+ Vector g2 = (Vector(2) << -9, 1).finished();
+ Vector g3 = (Vector(2) << 2, 3).finished();
+
+ double f = 10;
+
+ std::vector js;
+ js.push_back(0); js.push_back(1); js.push_back(3);
+ std::vector Gs;
+ Gs.push_back(G11); Gs.push_back(G12); Gs.push_back(G13); Gs.push_back(G22); Gs.push_back(G23); Gs.push_back(G33);
+ std::vector gs;
+ gs.push_back(g1); gs.push_back(g2); gs.push_back(g3);
+ RegularHessianFactor<2> factor(js, Gs, gs, f);
+
+ // multiplyHessianAdd:
+ {
+ // brute force
+ Matrix AtA = factor.information();
+ HessianFactor::const_iterator i1 = factor.begin();
+ HessianFactor::const_iterator i2 = i1 + 1;
+ Vector X(6); X << 1,2,3,4,5,6;
+ Vector Y(6); Y << 2633, 2674, 4465, 4501, 5669, 5696;
+ EXPECT(assert_equal(Y,AtA*X));
+
+ VectorValues x = map_list_of
+ (0, (Vector(2) << 1,2).finished())
+ (1, (Vector(2) << 3,4).finished())
+ (3, (Vector(2) << 5,6).finished());
+
+ VectorValues expected;
+ expected.insert(0, Y.segment<2>(0));
+ expected.insert(1, Y.segment<2>(2));
+ expected.insert(3, Y.segment<2>(4));
+
+ // RAW ACCESS
+ Vector expected_y(8); expected_y << 2633, 2674, 4465, 4501, 0, 0, 5669, 5696;
+ Vector fast_y = gtsam::zero(8);
+ double xvalues[8] = {1,2,3,4,0,0,5,6};
+ factor.multiplyHessianAdd(1, xvalues, fast_y.data());
+ EXPECT(assert_equal(expected_y, fast_y));
+
+ // now, do it with non-zero y
+ factor.multiplyHessianAdd(1, xvalues, fast_y.data());
+ EXPECT(assert_equal(2*expected_y, fast_y));
+
+ // check some expressions
+ EXPECT(assert_equal(G12,factor.info(i1,i2).knownOffDiagonal()));
+ EXPECT(assert_equal(G22,factor.info(i2,i2).selfadjointView()));
+ EXPECT(assert_equal((Matrix)G12.transpose(),factor.info(i2,i1).knownOffDiagonal()));
+ }
+}
+
+/* ************************************************************************* */
+int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
+/* ************************************************************************* */
diff --git a/gtsam/slam/tests/testRegularJacobianFactor.cpp b/gtsam/slam/tests/testRegularJacobianFactor.cpp
new file mode 100644
index 000000000..b56b65d7b
--- /dev/null
+++ b/gtsam/slam/tests/testRegularJacobianFactor.cpp
@@ -0,0 +1,224 @@
+/**
+ * @file testRegularJacobianFactor.cpp
+ * @brief unit test regular jacobian factors
+ * @author Sungtae An
+ * @date Nov 12, 2014
+ */
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+using namespace std;
+using namespace gtsam;
+using namespace boost::assign;
+
+static const size_t fixedDim = 3;
+static const size_t nrKeys = 3;
+
+// Keys are assumed to be from 0 to n
+namespace {
+ namespace simple {
+ // Terms we'll use
+ const vector > terms = list_of >
+ (make_pair(0, Matrix3::Identity()))
+ (make_pair(1, 2*Matrix3::Identity()))
+ (make_pair(2, 3*Matrix3::Identity()));
+
+ // RHS and sigmas
+ const Vector b = (Vector(3) << 1., 2., 3.).finished();
+ const SharedDiagonal noise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.5,0.5,0.5).finished());
+ }
+
+ namespace simple2 {
+ // Terms
+ const vector > terms2 = list_of >
+ (make_pair(0, 2*Matrix3::Identity()))
+ (make_pair(1, 4*Matrix3::Identity()))
+ (make_pair(2, 6*Matrix3::Identity()));
+
+ // RHS
+ const Vector b2 = (Vector(3) << 2., 4., 6.).finished();
+ }
+}
+
+/* ************************************************************************* */
+// Convert from double* to VectorValues
+VectorValues double2vv(const double* x,
+ const size_t nrKeys, const size_t dim) {
+ // create map with dimensions
+ std::map dims;
+ for (size_t i = 0; i < nrKeys; i++)
+ dims.insert(make_pair(i, dim));
+
+ size_t n = nrKeys*dim;
+ Vector xVec(n);
+ for (size_t i = 0; i < n; i++){
+ xVec(i) = x[i];
+ }
+ return VectorValues(xVec, dims);
+}
+
+/* ************************************************************************* */
+void vv2double(const VectorValues& vv, double* y,
+ const size_t nrKeys, const size_t dim) {
+ // create map with dimensions
+ std::map dims;
+ for (size_t i = 0; i < nrKeys; i++)
+ dims.insert(make_pair(i, dim));
+
+ Vector yvector = vv.vector(dims);
+ size_t n = nrKeys*dim;
+ for (size_t j = 0; j < n; j++)
+ y[j] = yvector[j];
+}
+
+/* ************************************************************************* */
+TEST(RegularJacobianFactor, constructorNway)
+{
+ using namespace simple;
+ JacobianFactor factor(terms[0].first, terms[0].second,
+ terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
+ RegularJacobianFactor regularFactor(terms, b, noise);
+
+ LONGS_EQUAL((long)terms[2].first, (long)regularFactor.keys().back());
+ EXPECT(assert_equal(terms[2].second, regularFactor.getA(regularFactor.end() - 1)));
+ EXPECT(assert_equal(b, factor.getb()));
+ EXPECT(assert_equal(b, regularFactor.getb()));
+ EXPECT(noise == factor.get_model());
+ EXPECT(noise == regularFactor.get_model());
+}
+
+/* ************************************************************************* */
+TEST(RegularJacobianFactor, hessianDiagonal)
+{
+ using namespace simple;
+ JacobianFactor factor(terms[0].first, terms[0].second,
+ terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
+ RegularJacobianFactor regularFactor(terms, b, noise);
+
+ // we compute hessian diagonal from the standard Jacobian
+ VectorValues expectedHessianDiagonal = factor.hessianDiagonal();
+
+ // we compare against the Raw memory access implementation of hessianDiagonal
+ double actualValue[9]={0};
+ regularFactor.hessianDiagonal(actualValue);
+ VectorValues actualHessianDiagonalRaw = double2vv(actualValue,nrKeys,fixedDim);
+ EXPECT(assert_equal(expectedHessianDiagonal, actualHessianDiagonalRaw));
+}
+
+/* ************************************************************************* */
+TEST(RegularJacobian, gradientAtZero)
+{
+ using namespace simple;
+ JacobianFactor factor(terms[0].first, terms[0].second,
+ terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
+ RegularJacobianFactor regularFactor(terms, b, noise);
+
+ // we compute gradient at zero from the standard Jacobian
+ VectorValues expectedGradientAtZero = factor.gradientAtZero();
+
+ //EXPECT(assert_equal(expectedGradientAtZero, regularFactor.gradientAtZero()));
+
+ // we compare against the Raw memory access implementation of gradientAtZero
+ double actualValue[9]={0};
+ regularFactor.gradientAtZero(actualValue);
+ VectorValues actualGradientAtZeroRaw = double2vv(actualValue,nrKeys,fixedDim);
+ EXPECT(assert_equal(expectedGradientAtZero, actualGradientAtZeroRaw));
+}
+
+/* ************************************************************************* */
+TEST(RegularJacobian, gradientAtZero_multiFactors)
+{
+ using namespace simple;
+ JacobianFactor factor(terms[0].first, terms[0].second,
+ terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
+ RegularJacobianFactor regularFactor(terms, b, noise);
+
+ // we compute gradient at zero from the standard Jacobian
+ VectorValues expectedGradientAtZero = factor.gradientAtZero();
+
+ // we compare against the Raw memory access implementation of gradientAtZero
+ double actualValue[9]={0};
+ regularFactor.gradientAtZero(actualValue);
+ VectorValues actualGradientAtZeroRaw = double2vv(actualValue,nrKeys,fixedDim);
+ EXPECT(assert_equal(expectedGradientAtZero, actualGradientAtZeroRaw));
+
+ // One more factor
+ using namespace simple2;
+ JacobianFactor factor2(terms2[0].first, terms2[0].second,
+ terms2[1].first, terms2[1].second, terms2[2].first, terms2[2].second, b2, noise);
+ RegularJacobianFactor regularFactor2(terms2, b2, noise);
+
+ // we accumulate computed gradient at zero from the standard Jacobian
+ VectorValues expectedGradientAtZero2 = expectedGradientAtZero.add(factor2.gradientAtZero());
+
+ // we compare against the Raw memory access implementation of gradientAtZero
+ regularFactor2.gradientAtZero(actualValue);
+ VectorValues actualGradientAtZeroRaw2 = double2vv(actualValue,nrKeys,fixedDim);
+ EXPECT(assert_equal(expectedGradientAtZero2, actualGradientAtZeroRaw2));
+
+}
+
+/* ************************************************************************* */
+TEST(RegularJacobian, multiplyHessianAdd)
+{
+ using namespace simple;
+ JacobianFactor factor(terms[0].first, terms[0].second,
+ terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
+ RegularJacobianFactor regularFactor(terms, b, noise);
+
+ // arbitrary vector X
+ VectorValues X;
+ X.insert(0, (Vector(3) << 10.,20.,30.).finished());
+ X.insert(1, (Vector(3) << 10.,20.,30.).finished());
+ X.insert(2, (Vector(3) << 10.,20.,30.).finished());
+
+ // arbitrary vector Y
+ VectorValues Y;
+ Y.insert(0, (Vector(3) << 10.,10.,10.).finished());
+ Y.insert(1, (Vector(3) << 20.,20.,20.).finished());
+ Y.insert(2, (Vector(3) << 30.,30.,30.).finished());
+
+ // multiplyHessianAdd Y += alpha*A'A*X
+ double alpha = 2.0;
+ VectorValues expectedMHA = Y;
+ factor.multiplyHessianAdd(alpha, X, expectedMHA);
+
+ // create data for raw memory access
+ double XRaw[9];
+ vv2double(X, XRaw, nrKeys, fixedDim);
+
+ // test 1st version: multiplyHessianAdd(double alpha, const double* x, double* y)
+ double actualMHARaw[9];
+ vv2double(Y, actualMHARaw, nrKeys, fixedDim);
+ regularFactor.multiplyHessianAdd(alpha, XRaw, actualMHARaw);
+ VectorValues actualMHARawVV = double2vv(actualMHARaw,nrKeys,fixedDim);
+ EXPECT(assert_equal(expectedMHA,actualMHARawVV));
+
+ // test 2nd version: multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys)
+ double actualMHARaw2[9];
+ vv2double(Y, actualMHARaw2, nrKeys, fixedDim);
+ vector dims;
+ size_t accumulatedDim = 0;
+ for (size_t i = 0; i < nrKeys+1; i++){
+ dims.push_back(accumulatedDim);
+ accumulatedDim += fixedDim;
+ }
+ regularFactor.multiplyHessianAdd(alpha, XRaw, actualMHARaw2, dims);
+ VectorValues actualMHARawVV2 = double2vv(actualMHARaw2,nrKeys,fixedDim);
+ EXPECT(assert_equal(expectedMHA,actualMHARawVV2));
+}
+
+/* ************************************************************************* */
+int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
+/* ************************************************************************* */
diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp
index 72bc4e8e3..89c3d5cf8 100644
--- a/tests/testPreconditioner.cpp
+++ b/tests/testPreconditioner.cpp
@@ -87,19 +87,19 @@ TEST(PCGSolver, simpleLinearSystem) {
simpleGFG += JacobianFactor(0, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2);
simpleGFG += JacobianFactor(1, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2);
simpleGFG += JacobianFactor(2, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2);
- simpleGFG.print("system");
+ //simpleGFG.print("system");
// Expected solution
VectorValues expectedSolution;
expectedSolution.insert(0, (Vector(2) << 0.100498, -0.196756).finished());
expectedSolution.insert(2, (Vector(2) << -0.0990413, -0.0980577).finished());
expectedSolution.insert(1, (Vector(2) << -0.0973252, 0.100582).finished());
- expectedSolution.print("Expected");
+ //expectedSolution.print("Expected");
// Solve the system using direct method
VectorValues deltaDirect = simpleGFG.optimize();
EXPECT(assert_equal(expectedSolution, deltaDirect, 1e-5));
- deltaDirect.print("Direct");
+ //deltaDirect.print("Direct");
// Solve the system using Preconditioned Conjugate Gradient solver
// Common PCG parameters
@@ -107,19 +107,19 @@ TEST(PCGSolver, simpleLinearSystem) {
pcg->setMaxIterations(500);
pcg->setEpsilon_abs(0.0);
pcg->setEpsilon_rel(0.0);
- pcg->setVerbosity("ERROR");
+ //pcg->setVerbosity("ERROR");
// With Dummy preconditioner
pcg->preconditioner_ = boost::make_shared();
VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG);
EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5));
- deltaPCGDummy.print("PCG Dummy");
+ //deltaPCGDummy.print("PCG Dummy");
// With Block-Jacobi preconditioner
pcg->preconditioner_ = boost::make_shared();
VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG);
EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5));
- deltaPCGJacobi.print("PCG Jacobi");
+ //deltaPCGJacobi.print("PCG Jacobi");
}