diff --git a/gtsam/linear/SubgraphPreconditioner.cpp b/gtsam/linear/SubgraphPreconditioner.cpp index fe8d403c8..118c5effa 100644 --- a/gtsam/linear/SubgraphPreconditioner.cpp +++ b/gtsam/linear/SubgraphPreconditioner.cpp @@ -15,10 +15,9 @@ * @author: Frank Dellaert */ -#if 0 - #include #include +#include #include using namespace std; @@ -41,47 +40,47 @@ namespace gtsam { /* ************************************************************************* */ SubgraphPreconditioner::SubgraphPreconditioner(const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar) : - Ab2_(convertToJacobianFactors(*Ab2)), Rc1_(Rc1), xbar_(xbar), b2bar_(new Errors(-gaussianErrors(*Ab2_,*xbar))) { + Ab2_(convertToJacobianFactors(*Ab2)), Rc1_(Rc1), xbar_(xbar), b2bar_(new Errors(-Ab2_->gaussianErrors(*xbar))) { } /* ************************************************************************* */ // x = xbar + inv(R1)*y VectorValues SubgraphPreconditioner::x(const VectorValues& y) const { - return *xbar_ + gtsam::backSubstitute(*Rc1_, y); + return *xbar_ + Rc1_->backSubstitute(y); } /* ************************************************************************* */ - double error(const SubgraphPreconditioner& sp, const VectorValues& y) { + double SubgraphPreconditioner::error(const VectorValues& y) const { Errors e(y); - VectorValues x = sp.x(y); - Errors e2 = gaussianErrors(*sp.Ab2(),x); + VectorValues x = this->x(y); + Errors e2 = Ab2()->gaussianErrors(x); return 0.5 * (dot(e, e) + dot(e2,e2)); } /* ************************************************************************* */ // gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar), - VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y) { - VectorValues x = gtsam::backSubstitute(*sp.Rc1(), y); /* inv(R1)*y */ - Errors e = (*sp.Ab2()*x - *sp.b2bar()); /* (A2*inv(R1)*y-b2bar) */ + VectorValues SubgraphPreconditioner::gradient(const VectorValues& y) const { + VectorValues x = Rc1()->backSubstitute(y); /* inv(R1)*y */ + Errors e = (*Ab2()*x - *b2bar()); /* (A2*inv(R1)*y-b2bar) */ VectorValues v = VectorValues::Zero(x); - transposeMultiplyAdd(*sp.Ab2(), 1.0, e, v); /* A2'*(A2*inv(R1)*y-b2bar) */ - return y + gtsam::backSubstituteTranspose(*sp.Rc1(), v); + Ab2()->transposeMultiplyAdd(1.0, e, v); /* A2'*(A2*inv(R1)*y-b2bar) */ + return y + Rc1()->backSubstituteTranspose(v); } /* ************************************************************************* */ // Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y] - Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y) { + Errors SubgraphPreconditioner::operator*(const VectorValues& y) const { Errors e(y); - VectorValues x = gtsam::backSubstitute(*sp.Rc1(), y); /* x=inv(R1)*y */ - Errors e2 = *sp.Ab2() * x; /* A2*x */ + VectorValues x = Rc1()->backSubstitute(y); /* x=inv(R1)*y */ + Errors e2 = *Ab2() * x; /* A2*x */ e.splice(e.end(), e2); return e; } /* ************************************************************************* */ // In-place version that overwrites e - void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e) { + void SubgraphPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const { Errors::iterator ei = e.begin(); for ( Index i = 0 ; i < y.size() ; ++i, ++ei ) { @@ -89,33 +88,33 @@ namespace gtsam { } // Add A2 contribution - VectorValues x = gtsam::backSubstitute(*sp.Rc1(), y); // x=inv(R1)*y - gtsam::multiplyInPlace(*sp.Ab2(), x, ei); // use iterator version + VectorValues x = Rc1()->backSubstitute(y); // x=inv(R1)*y + Ab2()->multiplyInPlace(x, ei); // use iterator version } /* ************************************************************************* */ // Apply operator A', A'*e = [I inv(R1')*A2']*e = e1 + inv(R1')*A2'*e2 - VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e) { + VectorValues SubgraphPreconditioner::operator^(const Errors& e) const { Errors::const_iterator it = e.begin(); - VectorValues y = sp.zero(); + VectorValues y = zero(); for ( Index i = 0 ; i < y.size() ; ++i, ++it ) y[i] = *it ; - sp.transposeMultiplyAdd2(1.0,it,e.end(),y); + transposeMultiplyAdd2(1.0,it,e.end(),y); return y; } /* ************************************************************************* */ // y += alpha*A'*e - void transposeMultiplyAdd - (const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y) { + void SubgraphPreconditioner::transposeMultiplyAdd + (double alpha, const Errors& e, VectorValues& y) const { Errors::const_iterator it = e.begin(); for ( Index i = 0 ; i < y.size() ; ++i, ++it ) { const Vector& ei = *it; axpy(alpha, ei, y[i]); } - sp.transposeMultiplyAdd2(alpha, it, e.end(), y); + transposeMultiplyAdd2(alpha, it, e.end(), y); } /* ************************************************************************* */ @@ -129,8 +128,8 @@ namespace gtsam { while (it != end) e2.push_back(*(it++)); VectorValues x = VectorValues::Zero(y); // x = 0 - gtsam::transposeMultiplyAdd(*Ab2_,1.0,e2,x); // x += A2'*e2 - axpy(alpha, gtsam::backSubstituteTranspose(*Rc1_, x), y); // y += alpha*inv(R1')*x + Ab2_->transposeMultiplyAdd(1.0,e2,x); // x += A2'*e2 + axpy(alpha, Rc1_->backSubstituteTranspose(x), y); // y += alpha*inv(R1')*x } /* ************************************************************************* */ @@ -139,5 +138,3 @@ namespace gtsam { Ab2_->print(); } } // nsamespace gtsam - -#endif diff --git a/gtsam/linear/SubgraphPreconditioner.h b/gtsam/linear/SubgraphPreconditioner.h index bbba31829..d9d7524b6 100644 --- a/gtsam/linear/SubgraphPreconditioner.h +++ b/gtsam/linear/SubgraphPreconditioner.h @@ -23,8 +23,6 @@ #include -#if 0 - namespace gtsam { // Forward declarations @@ -66,6 +64,9 @@ namespace gtsam { */ SubgraphPreconditioner(const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar); + /** print the object */ + void print(const std::string& s = "SubgraphPreconditioner") const; + /** Access Ab2 */ const sharedFG& Ab2() const { return Ab2_; } @@ -98,31 +99,26 @@ namespace gtsam { void transposeMultiplyAdd2(double alpha, Errors::const_iterator begin, Errors::const_iterator end, VectorValues& y) const; - /** print the object */ - void print(const std::string& s = "SubgraphPreconditioner") const; - }; + /* error, given y */ + double error(const VectorValues& y) const; - /* error, given y */ - GTSAM_EXPORT double error(const SubgraphPreconditioner& sp, const VectorValues& y); + /** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */ + VectorValues gradient(const VectorValues& y) const; - /** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */ - GTSAM_EXPORT VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y); + /** Apply operator A */ + Errors operator*(const VectorValues& y) const; - /** Apply operator A */ - GTSAM_EXPORT Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y); - - /** Apply operator A in place: needs e allocated already */ - GTSAM_EXPORT void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e); + /** Apply operator A in place: needs e allocated already */ + void multiplyInPlace(const VectorValues& y, Errors& e) const; /** Apply operator A' */ - GTSAM_EXPORT VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e); + VectorValues operator^(const Errors& e) const; - /** - * Add A'*e to y - * y += alpha*A'*[e1;e2] = [alpha*e1; alpha*inv(R1')*A2'*e2] - */ - GTSAM_EXPORT void transposeMultiplyAdd(const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y); + /** + * Add A'*e to y + * y += alpha*A'*[e1;e2] = [alpha*e1; alpha*inv(R1')*A2'*e2] + */ + void transposeMultiplyAdd(double alpha, const Errors& e, VectorValues& y) const; + }; } // namespace gtsam - -#endif \ No newline at end of file diff --git a/gtsam/linear/SubgraphSolver.cpp b/gtsam/linear/SubgraphSolver.cpp index 649994bfd..b6c2a6911 100644 --- a/gtsam/linear/SubgraphSolver.cpp +++ b/gtsam/linear/SubgraphSolver.cpp @@ -9,8 +9,6 @@ * -------------------------------------------------------------------------- */ -#if 0 - #include #include #include @@ -29,60 +27,64 @@ using namespace std; namespace gtsam { /**************************************************************************************************/ -SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters) - : parameters_(parameters) +SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters, const Ordering& ordering) + : parameters_(parameters), ordering_(ordering) { initialize(gfg); } /**************************************************************************************************/ -SubgraphSolver::SubgraphSolver(const GaussianFactorGraph::shared_ptr &jfg, const Parameters ¶meters) - : parameters_(parameters) +SubgraphSolver::SubgraphSolver(const GaussianFactorGraph::shared_ptr &jfg, const Parameters ¶meters, const Ordering& ordering) + : parameters_(parameters), ordering_(ordering) { initialize(*jfg); } /**************************************************************************************************/ -SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1, const GaussianFactorGraph &Ab2, const Parameters ¶meters) - : parameters_(parameters) { +SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1, const GaussianFactorGraph &Ab2, const Parameters ¶meters, const Ordering& ordering) + : parameters_(parameters), ordering_(ordering) { - GaussianBayesNet::shared_ptr Rc1 = EliminationTree::Create(Ab1)->eliminate(&EliminateQR); + GaussianBayesNet::shared_ptr Rc1 = Ab1.eliminateSequential(ordering_, EliminateQR); initialize(Rc1, boost::make_shared(Ab2)); } /**************************************************************************************************/ SubgraphSolver::SubgraphSolver(const GaussianFactorGraph::shared_ptr &Ab1, - const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters) - : parameters_(parameters) { + const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters, const Ordering& ordering) + : parameters_(parameters), ordering_(ordering) { - GaussianBayesNet::shared_ptr Rc1 = EliminationTree::Create(*Ab1)->eliminate(&EliminateQR); + GaussianBayesNet::shared_ptr Rc1 = Ab1->eliminateSequential(ordering_, EliminateQR); initialize(Rc1, Ab2); } /**************************************************************************************************/ SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph &Ab2, - const Parameters ¶meters) : parameters_(parameters) + const Parameters ¶meters, const Ordering& ordering) : parameters_(parameters), ordering_(ordering) { initialize(Rc1, boost::make_shared(Ab2)); } /**************************************************************************************************/ SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, - const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters) : parameters_(parameters) + const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters, const Ordering& ordering) : +parameters_(parameters), ordering_(ordering) { initialize(Rc1, Ab2); } +/**************************************************************************************************/ VectorValues SubgraphSolver::optimize() { VectorValues ybar = conjugateGradients(*pc_, pc_->zero(), parameters_); return pc_->x(ybar); } +/**************************************************************************************************/ VectorValues SubgraphSolver::optimize(const VectorValues &initial) { // the initial is ignored in this case ... return optimize(); } +/**************************************************************************************************/ void SubgraphSolver::initialize(const GaussianFactorGraph &jfg) { GaussianFactorGraph::shared_ptr Ab1 = boost::make_shared(), @@ -92,17 +94,19 @@ void SubgraphSolver::initialize(const GaussianFactorGraph &jfg) if (parameters_.verbosity()) cout << "Split A into (A1) " << Ab1->size() << " and (A2) " << Ab2->size() << " factors" << endl; - GaussianBayesNet::shared_ptr Rc1 = EliminationTree::Create(*Ab1)->eliminate(&EliminateQR); - VectorValues::shared_ptr xbar(new VectorValues(gtsam::optimize(*Rc1))); + GaussianBayesNet::shared_ptr Rc1 = Ab1->eliminateSequential(ordering_, EliminateQR); + VectorValues::shared_ptr xbar = boost::make_shared(Rc1->optimize()); pc_ = boost::make_shared(Ab2, Rc1, xbar); } +/**************************************************************************************************/ void SubgraphSolver::initialize(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph::shared_ptr &Ab2) { - VectorValues::shared_ptr xbar(new VectorValues(gtsam::optimize(*Rc1))); + VectorValues::shared_ptr xbar = boost::make_shared(Rc1->optimize()); pc_ = boost::make_shared(Ab2, Rc1, xbar); } +/**************************************************************************************************/ boost::tuple SubgraphSolver::splitGraph(const GaussianFactorGraph &jfg) { @@ -140,5 +144,3 @@ SubgraphSolver::splitGraph(const GaussianFactorGraph &jfg) { } } // \namespace gtsam - -#endif diff --git a/gtsam/linear/SubgraphSolver.h b/gtsam/linear/SubgraphSolver.h index 9abe381c4..acb654ba5 100644 --- a/gtsam/linear/SubgraphSolver.h +++ b/gtsam/linear/SubgraphSolver.h @@ -14,9 +14,16 @@ #include #include +#include + +#include namespace gtsam { + // Forward declarations + class GaussianFactorGraph; + class GaussianBayesNet; + class GTSAM_EXPORT SubgraphSolverParameters : public ConjugateGradientParameters { public: typedef ConjugateGradientParameters Base; @@ -24,8 +31,6 @@ public: virtual void print() const { Base::print(); } }; -#if 0 - /** * This class implements the SPCG solver presented in Dellaert et al in IROS'10. * @@ -55,20 +60,21 @@ public: protected: Parameters parameters_; + Ordering ordering_; SubgraphPreconditioner::shared_ptr pc_; ///< preconditioner object public: /* Given a gaussian factor graph, split it into a spanning tree (A1) + others (A2) for SPCG */ - SubgraphSolver(const GaussianFactorGraph &A, const Parameters ¶meters); - SubgraphSolver(const GaussianFactorGraph::shared_ptr &A, const Parameters ¶meters); + SubgraphSolver(const GaussianFactorGraph &A, const Parameters ¶meters, const Ordering& ordering); + SubgraphSolver(const boost::shared_ptr &A, const Parameters ¶meters, const Ordering& ordering); /* The user specify the subgraph part and the constraint part, may throw exception if A1 is underdetermined */ - SubgraphSolver(const GaussianFactorGraph &Ab1, const GaussianFactorGraph &Ab2, const Parameters ¶meters); - SubgraphSolver(const GaussianFactorGraph::shared_ptr &Ab1, const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters); + SubgraphSolver(const GaussianFactorGraph &Ab1, const GaussianFactorGraph &Ab2, const Parameters ¶meters, const Ordering& ordering); + SubgraphSolver(const boost::shared_ptr &Ab1, const boost::shared_ptr &Ab2, const Parameters ¶meters, const Ordering& ordering); /* The same as above, but the A1 is solved before */ - SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph &Ab2, const Parameters ¶meters); - SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters); + SubgraphSolver(const boost::shared_ptr &Rc1, const GaussianFactorGraph &Ab2, const Parameters ¶meters, const Ordering& ordering); + SubgraphSolver(const boost::shared_ptr &Rc1, const boost::shared_ptr &Ab2, const Parameters ¶meters, const Ordering& ordering); virtual ~SubgraphSolver() {} virtual VectorValues optimize () ; @@ -77,12 +83,10 @@ public: protected: void initialize(const GaussianFactorGraph &jfg); - void initialize(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph::shared_ptr &Ab2); + void initialize(const boost::shared_ptr &Rc1, const boost::shared_ptr &Ab2); - boost::tuple + boost::tuple, boost::shared_ptr > splitGraph(const GaussianFactorGraph &gfg) ; }; -#endif - } // namespace gtsam diff --git a/gtsam/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h index 47997b44c..e61b67f73 100644 --- a/gtsam/linear/iterative-inl.h +++ b/gtsam/linear/iterative-inl.h @@ -45,11 +45,11 @@ namespace gtsam { // Start with g0 = A'*(A*x0-b), d0 = - g0 // i.e., first step is in direction of negative gradient - g = gradient(Ab,x); + g = Ab.gradient(x); d = g; // instead of negating gradient, alpha will be negated // init gamma and calculate threshold - gamma = dot(g,g) ; + gamma = dot(g,g); threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma); // Allocate and calculate A*d for first iteration @@ -86,9 +86,9 @@ namespace gtsam { double alpha = takeOptimalStep(x); // update gradient (or re-calculate at reset time) - if (k % parameters_.reset() == 0) g = gradient(Ab,x); + if (k % parameters_.reset() == 0) g = Ab.gradient(x); // axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad) - else transposeMultiplyAdd(Ab, alpha, Ad, g); + else Ab.transposeMultiplyAdd(alpha, Ad, g); // check for convergence double new_gamma = dot(g, g); @@ -105,14 +105,14 @@ namespace gtsam { else { double beta = new_gamma / gamma; // d = g + d*beta; - scal(beta, d); + d *= beta; axpy(1.0, g, d); } gamma = new_gamma; // In-place recalculation Ad <- A*d to avoid re-allocating Ad - multiplyInPlace(Ab, d, Ad); + Ab.multiplyInPlace(d, Ad); return false; } diff --git a/gtsam/linear/iterative.cpp b/gtsam/linear/iterative.cpp index 52652fd79..07d4de865 100644 --- a/gtsam/linear/iterative.cpp +++ b/gtsam/linear/iterative.cpp @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include @@ -61,15 +61,15 @@ namespace gtsam { } /* ************************************************************************* */ - VectorValuesOrdered steepestDescent(const GaussianFactorGraphOrdered& fg, - const VectorValuesOrdered& x, const ConjugateGradientParameters & parameters) { - return conjugateGradients( + VectorValues steepestDescent(const GaussianFactorGraph& fg, + const VectorValues& x, const ConjugateGradientParameters & parameters) { + return conjugateGradients( fg, x, parameters, true); } - VectorValuesOrdered conjugateGradientDescent(const GaussianFactorGraphOrdered& fg, - const VectorValuesOrdered& x, const ConjugateGradientParameters & parameters) { - return conjugateGradients( + VectorValues conjugateGradientDescent(const GaussianFactorGraph& fg, + const VectorValues& x, const ConjugateGradientParameters & parameters) { + return conjugateGradients( fg, x, parameters); } diff --git a/gtsam/linear/iterative.h b/gtsam/linear/iterative.h index cffe856de..1ba0a7423 100644 --- a/gtsam/linear/iterative.h +++ b/gtsam/linear/iterative.h @@ -19,7 +19,7 @@ #pragma once #include -#include +#include #include namespace gtsam { @@ -68,28 +68,28 @@ namespace gtsam { * Print with optional string */ void print (const std::string& s = "System") const; + + /** gradient of objective function 0.5*|Ax-b_|^2 at x = A_'*(Ax-b_) */ + Vector gradient(const Vector& x) const { + return A() ^ (A() * x - b()); + } + + /** Apply operator A */ + Vector operator*(const Vector& x) const { + return A() * x; + } + + /** Apply operator A in place */ + void multiplyInPlace(const Vector& x, Vector& e) const { + e = A() * x; + } + + /** x += alpha* A'*e */ + void transposeMultiplyAdd(double alpha, const Vector& e, Vector& x) const { + gtsam::transposeMultiplyAdd(alpha, A(), e, x); + } }; - /** gradient of objective function 0.5*|Ax-b_|^2 at x = A_'*(Ax-b_) */ - inline Vector gradient(const System& system, const Vector& x) { - return system.A() ^ (system.A() * x - system.b()); - } - - /** Apply operator A */ - inline Vector operator*(const System& system, const Vector& x) { - return system.A() * x; - } - - /** Apply operator A in place */ - inline void multiplyInPlace(const System& system, const Vector& x, Vector& e) { - e = system.A() * x; - } - - /** x += alpha* A'*e */ - inline void transposeMultiplyAdd(const System& system, double alpha, const Vector& e, Vector& x) { - transposeMultiplyAdd(alpha,system.A(),e,x); - } - /** * Method of steepest gradients, System version */ @@ -129,17 +129,17 @@ namespace gtsam { /** * Method of steepest gradients, Gaussian Factor Graph version */ - GTSAM_EXPORT VectorValuesOrdered steepestDescent( - const GaussianFactorGraphOrdered& fg, - const VectorValuesOrdered& x, + GTSAM_EXPORT VectorValues steepestDescent( + const GaussianFactorGraph& fg, + const VectorValues& x, const ConjugateGradientParameters & parameters); /** * Method of conjugate gradients (CG), Gaussian Factor Graph version */ - GTSAM_EXPORT VectorValuesOrdered conjugateGradientDescent( - const GaussianFactorGraphOrdered& fg, - const VectorValuesOrdered& x, + GTSAM_EXPORT VectorValues conjugateGradientDescent( + const GaussianFactorGraph& fg, + const VectorValues& x, const ConjugateGradientParameters & parameters); diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp index 67c1c08d1..4b06bc3f7 100644 --- a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp +++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp @@ -63,16 +63,14 @@ VectorValues solveGaussianFactorGraph(const GaussianFactorGraph &gfg, const Succ delta = gfg.eliminateSequential(*params.ordering, params.getEliminationFunction())->optimize(); } else if ( params.isCG() ) { - throw std::runtime_error("Not implemented"); - - //if ( !params.iterativeParams ) throw std::runtime_error("solveGaussianFactorGraph: cg parameter has to be assigned ..."); - //if ( boost::dynamic_pointer_cast(params.iterativeParams) ) { - // SubgraphSolver solver (gfg, *boost::dynamic_pointer_cast(params.iterativeParams)); - // delta = solver.optimize(); - //} - //else { - // throw std::runtime_error("solveGaussianFactorGraph: special cg parameter type is not handled in LM solver ..."); - //} + if ( !params.iterativeParams ) throw std::runtime_error("solveGaussianFactorGraph: cg parameter has to be assigned ..."); + if ( boost::dynamic_pointer_cast(params.iterativeParams) ) { + SubgraphSolver solver(gfg, dynamic_cast(*params.iterativeParams), *params.ordering); + delta = solver.optimize(); + } + else { + throw std::runtime_error("solveGaussianFactorGraph: special cg parameter type is not handled in LM solver ..."); + } } else { throw std::runtime_error("solveGaussianFactorGraph: Optimization parameter is invalid");