diff --git a/examples/Pose2SLAMwSPCG.cpp b/examples/Pose2SLAMwSPCG.cpp index 984701734..ddf0a7ad0 100644 --- a/examples/Pose2SLAMwSPCG.cpp +++ b/examples/Pose2SLAMwSPCG.cpp @@ -16,6 +16,8 @@ * @date June 2, 2012 */ +#include +#include #include #include @@ -61,10 +63,26 @@ int main(void) { cout << "initial error = " << graph.error(initialEstimate) << endl ; // 4. Single Step Optimization using Levenberg-Marquardt - // Note: Although there are many options in IterativeOptimizationParameters, - Values result = graph.optimizeSPCG(initialEstimate); - result.print("\nFinal result:\n"); - cout << "final error = " << graph.error(result) << endl; + LevenbergMarquardtParams param; + param.verbosity = NonlinearOptimizerParams::ERROR; + param.verbosityLM = LevenbergMarquardtParams::LAMBDA; + param.linearSolverType = SuccessiveLinearizationParams::CG; + + { + param.iterativeParams = boost::make_shared(); + LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, param); + Values result = optimizer.optimize(); + result.print("\nFinal result:\n"); + cout << "simple spcg solver final error = " << graph.error(result) << endl; + } + + { + param.iterativeParams = boost::make_shared(); + LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, param); + Values result = optimizer.optimize(); + result.print("\nFinal result:\n"); + cout << "subgraph solver final error = " << graph.error(result) << endl; + } return 0 ; } diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index 2cfd1279f..f7f1ad5dc 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -36,16 +36,14 @@ struct ConjugateGradientParameters : public IterativeOptimizationParameters { SBM_MT ///< Sparse Block Matrix Multithreaded } blas_kernel_; - enum Verbosity { SILENT = 0, COMPLEXITY = 1, ERROR = 2} verbosity_ ; /* Verbosity */ - ConjugateGradientParameters() : minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3), epsilon_abs_(1e-3), - blas_kernel_(GTSAM), verbosity_(SILENT) {} + blas_kernel_(GTSAM) {} ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset, - double epsilon_rel, double epsilon_abs, BLASKernel blas = GTSAM, Verbosity verbosity = SILENT) + double epsilon_rel, double epsilon_abs, BLASKernel blas = GTSAM) : minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset), - epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas), verbosity_(verbosity) {} + epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas) {} /* general interface */ inline size_t minIterations() const { return minIterations_; } @@ -55,7 +53,6 @@ struct ConjugateGradientParameters : public IterativeOptimizationParameters { inline double epsilon_rel() const { return epsilon_rel_; } inline double epsilon_abs() const { return epsilon_abs_; } inline BLASKernel blas_kernel() const { return blas_kernel_; } - inline Verbosity verbosity() const { return verbosity_; } void print() const { const std::string blasStr[3] = {"gtsam", "sbm", "sbm-mt"}; @@ -67,7 +64,6 @@ struct ConjugateGradientParameters : public IterativeOptimizationParameters { << ", resetIter = " << reset_ << ", eps_rel = " << epsilon_rel_ << ", eps_abs = " << epsilon_abs_ - << ", verbosity = " << verbosity_ << std::endl; } }; diff --git a/gtsam/linear/Errors.cpp b/gtsam/linear/Errors.cpp index d3007350f..65a52cce1 100644 --- a/gtsam/linear/Errors.cpp +++ b/gtsam/linear/Errors.cpp @@ -86,6 +86,16 @@ Errors Errors::operator-(const Errors& b) const { return result; } +/* ************************************************************************* */ +Errors Errors::operator-() const { + Errors result; + BOOST_FOREACH(const Vector& ai, *this) + result.push_back(-ai); + return result; +} + + + /* ************************************************************************* */ double dot(const Errors& a, const Errors& b) { #ifndef NDEBUG diff --git a/gtsam/linear/Errors.h b/gtsam/linear/Errors.h index b51bf606d..8e39e40ff 100644 --- a/gtsam/linear/Errors.h +++ b/gtsam/linear/Errors.h @@ -45,6 +45,8 @@ namespace gtsam { /** subtraction */ Errors operator-(const Errors& b) const; + /** negation */ + Errors operator-() const ; }; // Errors diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index 056bda67e..62b63bb27 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -99,6 +99,24 @@ void optimizeInPlace(const GaussianBayesNet& bn, VectorValues& x) { } } +/* ************************************************************************* */ +VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& input) { + VectorValues output = input; + BOOST_REVERSE_FOREACH(const boost::shared_ptr cg, bn) { + const Index key = *(cg->beginFrontals()); + Vector xS = internal::extractVectorValuesSlices(output, cg->beginParents(), cg->endParents()); + xS = input[key] - cg->get_S() * xS; + output[key] = cg->get_R().triangularView().solve(xS); + } + + BOOST_FOREACH(const boost::shared_ptr cg, bn) { + cg->scaleFrontalsBySigma(output); + } + + return output; +} + + /* ************************************************************************* */ // gy=inv(L)*gx by solving L*gy=gx. // gy=inv(R'*inv(Sigma))*gx diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h index 8c6e72357..fb179da92 100644 --- a/gtsam/linear/GaussianBayesNet.h +++ b/gtsam/linear/GaussianBayesNet.h @@ -106,6 +106,12 @@ namespace gtsam { * */ void optimizeGradientSearchInPlace(const GaussianBayesNet& bn, VectorValues& grad); + /** + * Backsubstitute + * gy=inv(R*inv(Sigma))*gx + */ + VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& gx); + /** * Transpose Backsubstitute * gy=inv(L)*gx by solving L*gy=gx. diff --git a/gtsam/linear/IterativeSolver.h b/gtsam/linear/IterativeSolver.h index de1c564ef..fb19e6d58 100644 --- a/gtsam/linear/IterativeSolver.h +++ b/gtsam/linear/IterativeSolver.h @@ -23,8 +23,7 @@ namespace gtsam { public: typedef boost::shared_ptr shared_ptr; - - enum Kernel { PCG = 0, LSPCG = 1 } kernel_ ; ///< Iterative Method Kernel + enum Kernel { CG = 0 } kernel_ ; ///< Iterative Method Kernel enum Verbosity { SILENT = 0, COMPLEXITY = 1, ERROR = 2} verbosity_ ; ///< Verbosity public: @@ -32,7 +31,7 @@ namespace gtsam { IterativeOptimizationParameters(const IterativeOptimizationParameters &p) : kernel_(p.kernel_), verbosity_(p.verbosity_) {} - IterativeOptimizationParameters(Kernel kernel = LSPCG, Verbosity verbosity = SILENT) + IterativeOptimizationParameters(Kernel kernel = CG, Verbosity verbosity = SILENT) : kernel_(kernel), verbosity_(verbosity) {} virtual ~IterativeOptimizationParameters() {} @@ -42,7 +41,7 @@ namespace gtsam { inline Verbosity verbosity() const { return verbosity_; } void print() const { - const std::string kernelStr[2] = {"pcg", "lspcg"}; + const std::string kernelStr[1] = {"cg"}; std::cout << "IterativeOptimizationParameters: " << "kernel = " << kernelStr[kernel_] << ", verbosity = " << verbosity_ << std::endl; diff --git a/gtsam/linear/SimpleSPCGSolver.cpp b/gtsam/linear/SimpleSPCGSolver.cpp index 03401cc7f..addbc74dd 100644 --- a/gtsam/linear/SimpleSPCGSolver.cpp +++ b/gtsam/linear/SimpleSPCGSolver.cpp @@ -96,9 +96,7 @@ VectorValues::shared_ptr SimpleSPCGSolver::optimize (const VectorValues &initial double gamma = s.vector().squaredNorm(), new_gamma = 0.0, alpha = 0.0, beta = 0.0 ; - const double threshold = - std::max(parameters_.epsilon_abs(), - parameters_.epsilon() * parameters_.epsilon() * gamma); + const double threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma); const size_t iMaxIterations = parameters_.maxIterations(); const Parameters::Verbosity verbosity = parameters_.verbosity(); @@ -217,10 +215,6 @@ SimpleSPCGSolver::splitGraph(const GaussianFactorGraph &gfg) { else Ac->push_back(boost::dynamic_pointer_cast(gf)); } -// gfg.print("gfg"); -// At->print("At"); -// Ac->print("Ac"); - return boost::tie(At, Ac); } diff --git a/gtsam/linear/SimpleSPCGSolver.h b/gtsam/linear/SimpleSPCGSolver.h index 406232d3c..097022c22 100644 --- a/gtsam/linear/SimpleSPCGSolver.h +++ b/gtsam/linear/SimpleSPCGSolver.h @@ -19,6 +19,11 @@ namespace gtsam { +struct SimpleSPCGSolverParameters : public ConjugateGradientParameters { + typedef ConjugateGradientParameters Base; + SimpleSPCGSolverParameters() : Base() {} +}; + /** * This class gives a simple implementation to the SPCG solver presented in Dellaert et al in IROS'10. * @@ -40,7 +45,7 @@ class SimpleSPCGSolver : public IterativeSolver { public: typedef IterativeSolver Base; - typedef ConjugateGradientParameters Parameters; + typedef SimpleSPCGSolverParameters Parameters; typedef boost::shared_ptr shared_ptr; protected: diff --git a/gtsam/linear/SubgraphPreconditioner.cpp b/gtsam/linear/SubgraphPreconditioner.cpp index 59a72dd8b..91a98a263 100644 --- a/gtsam/linear/SubgraphPreconditioner.cpp +++ b/gtsam/linear/SubgraphPreconditioner.cpp @@ -28,27 +28,17 @@ namespace gtsam { /* ************************************************************************* */ SubgraphPreconditioner::SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar) : - Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(gaussianErrors_(*Ab2_,*xbar)) { + Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(new Errors(-gaussianErrors(*Ab2_,*xbar))) { } /* ************************************************************************* */ // x = xbar + inv(R1)*y VectorValues SubgraphPreconditioner::x(const VectorValues& y) const { - VectorValues x = y; - optimizeInPlace(*Rc1_,x); - x += *xbar_; - return x; + return *xbar_ + gtsam::backSubstitute(*Rc1_, y); } -// SubgraphPreconditioner SubgraphPreconditioner::add_priors(double sigma) const { -// SubgraphPreconditioner result = *this ; -// result.Ab2_ = sharedFG(new GaussianFactorGraph(Ab2_->add_priors(sigma))) ; -// return result ; -// } - /* ************************************************************************* */ double error(const SubgraphPreconditioner& sp, const VectorValues& y) { - Errors e(y); VectorValues x = sp.x(y); Errors e2 = gaussianErrors(*sp.Ab2(),x); @@ -58,12 +48,11 @@ namespace gtsam { /* ************************************************************************* */ // gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar), VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y) { - VectorValues x = sp.x(y); // x = inv(R1)*y - Errors e2 = gaussianErrors(*sp.Ab2(),x); - VectorValues gx2 = VectorValues::Zero(y); - gtsam::transposeMultiplyAdd(*sp.Ab2(),1.0,e2,gx2); // A2'*e2; - VectorValues gy2 = gtsam::backSubstituteTranspose(*sp.Rc1(), gx2); // inv(R1')*gx2 - return y + gy2; + VectorValues x = gtsam::backSubstitute(*sp.Rc1(), y); /* inv(R1)*y */ + Errors e = (*sp.Ab2()*x - *sp.b2bar()); /* (A2*inv(R1)*y-b2bar) */ + VectorValues v = VectorValues::Zero(x); + transposeMultiplyAdd(*sp.Ab2(), 1.0, e, v); /* A2'*(A2*inv(R1)*y-b2bar) */ + return y + gtsam::backSubstituteTranspose(*sp.Rc1(), v); } /* ************************************************************************* */ @@ -71,13 +60,9 @@ namespace gtsam { Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y) { Errors e(y); - - // Add A2 contribution - VectorValues x = y; // TODO avoid ? - gtsam::optimizeInPlace(*sp.Rc1(), x); // x=inv(R1)*y - Errors e2 = *sp.Ab2() * x; // A2*x + VectorValues x = gtsam::backSubstitute(*sp.Rc1(), y); /* x=inv(R1)*y */ + Errors e2 = *sp.Ab2() * x; /* A2*x */ e.splice(e.end(), e2); - return e; } @@ -85,16 +70,14 @@ namespace gtsam { // In-place version that overwrites e void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e) { - Errors::iterator ei = e.begin(); for ( Index i = 0 ; i < y.size() ; ++i, ++ei ) { *ei = y[i]; } // Add A2 contribution - VectorValues x = y; // TODO avoid ? - gtsam::optimizeInPlace(*sp.Rc1(), x); // x=inv(R1)*y - gtsam::multiplyInPlace(*sp.Ab2(),x,ei); // use iterator version + VectorValues x = gtsam::backSubstitute(*sp.Rc1(), y); // x=inv(R1)*y + gtsam::multiplyInPlace(*sp.Ab2(), x, ei); // use iterator version } /* ************************************************************************* */ @@ -114,13 +97,12 @@ namespace gtsam { void transposeMultiplyAdd (const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y) { - Errors::const_iterator it = e.begin(); for ( Index i = 0 ; i < y.size() ; ++i, ++it ) { const Vector& ei = *it; - axpy(alpha,ei,y[i]); + axpy(alpha, ei, y[i]); } - sp.transposeMultiplyAdd2(alpha,it,e.end(),y); + sp.transposeMultiplyAdd2(alpha, it, e.end(), y); } /* ************************************************************************* */ diff --git a/gtsam/linear/SubgraphPreconditioner.h b/gtsam/linear/SubgraphPreconditioner.h index 460f28e5a..7b59735d9 100644 --- a/gtsam/linear/SubgraphPreconditioner.h +++ b/gtsam/linear/SubgraphPreconditioner.h @@ -42,8 +42,8 @@ namespace gtsam { private: sharedFG Ab1_, Ab2_; sharedBayesNet Rc1_; - sharedValues xbar_; - sharedErrors b2bar_; /** b2 - A2*xbar */ + sharedValues xbar_; ///< A1 \ b1 + sharedErrors b2bar_; ///< A2*xbar - b2 public: @@ -66,6 +66,9 @@ namespace gtsam { /** Access Rc1 */ const sharedBayesNet& Rc1() const { return Rc1_; } + /** Access b2bar */ + const sharedErrors b2bar() const { return b2bar_; } + /** * Add zero-mean i.i.d. Gaussian prior terms to each variable * @param sigma Standard deviation of Gaussian diff --git a/gtsam/linear/SubgraphSolver.cpp b/gtsam/linear/SubgraphSolver.cpp index 5d6237d9c..c91365f19 100644 --- a/gtsam/linear/SubgraphSolver.cpp +++ b/gtsam/linear/SubgraphSolver.cpp @@ -61,9 +61,7 @@ SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters VectorValues::shared_ptr SubgraphSolver::optimize() { - // preconditioned conjugate gradient - VectorValues zeros = pc_->zero(); - VectorValues ybar = conjugateGradients (*pc_, zeros, parameters_); + VectorValues ybar = conjugateGradients(*pc_, pc_->zero(), parameters_); boost::shared_ptr xbar = boost::make_shared() ; *xbar = pc_->x(ybar); diff --git a/gtsam/linear/SubgraphSolver.h b/gtsam/linear/SubgraphSolver.h index 111f85cad..ec9a5f19a 100644 --- a/gtsam/linear/SubgraphSolver.h +++ b/gtsam/linear/SubgraphSolver.h @@ -19,6 +19,11 @@ namespace gtsam { +struct SubgraphSolverParameters : public ConjugateGradientParameters { + typedef ConjugateGradientParameters Base; + SubgraphSolverParameters() : Base() {} +}; + /** * A linear system solver using subgraph preconditioning conjugate gradient */ @@ -27,18 +32,17 @@ class SubgraphSolver : public IterativeSolver { public: - typedef ConjugateGradientParameters Parameters; + typedef SubgraphSolverParameters Parameters; + +protected: Parameters parameters_; - SubgraphPreconditioner::shared_ptr pc_; ///< preconditioner object public: SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters); - virtual ~SubgraphSolver() {} - virtual VectorValues::shared_ptr optimize () ; protected: diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 8fbedc614..ddebdb498 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -76,15 +76,19 @@ void LevenbergMarquardtOptimizer::iterate() { } else if ( params_.isCG() ) { - ConjugateGradientParameters::shared_ptr params (!params_.iterativeParams ? - boost::make_shared() : - boost::dynamic_pointer_cast(params_.iterativeParams)); + if ( !params_.iterativeParams ) throw runtime_error("LMSolver: cg parameter has to be assigned ..."); - if ( !params ) throw runtime_error("LMSolver: spcg parameter dynamic casting failed"); - - SimpleSPCGSolver solver(dampedSystem, *params); - //SubgraphSolver solver(dampedSystem, *params); - delta = *solver.optimize(); + if ( boost::dynamic_pointer_cast(params_.iterativeParams)) { + SimpleSPCGSolver solver (dampedSystem, *boost::dynamic_pointer_cast(params_.iterativeParams)); + delta = *solver.optimize(); + } + else if ( boost::dynamic_pointer_cast(params_.iterativeParams) ) { + SubgraphSolver solver (dampedSystem, *boost::dynamic_pointer_cast(params_.iterativeParams)); + delta = *solver.optimize(); + } + else { + throw runtime_error("LMSolver: special cg parameter type is not handled in LM solver ..."); + } } else { throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); diff --git a/gtsam_unstable/linear/iterative-inl.h b/gtsam_unstable/linear/iterative-inl.h index 5641cb7ae..f384b7504 100644 --- a/gtsam_unstable/linear/iterative-inl.h +++ b/gtsam_unstable/linear/iterative-inl.h @@ -118,13 +118,10 @@ namespace gtsam { // conjugate gradient method. // S: linear system, V: step vector, E: errors template - V conjugateGradients( - const S& Ab, - V x, - const ConjugateGradientParameters ¶meters, - bool steepest = false) { + V conjugateGradients(const S& Ab, V x, const ConjugateGradientParameters ¶meters, bool steepest = false) { CGState state(Ab, x, parameters, steepest); + if (parameters.verbosity() != ConjugateGradientParameters::SILENT) std::cout << "CG: epsilon = " << parameters.epsilon() << ", maxIterations = " << parameters.maxIterations() diff --git a/gtsam_unstable/linear/iterative.h b/gtsam_unstable/linear/iterative.h index f578391d0..6765ac22a 100644 --- a/gtsam_unstable/linear/iterative.h +++ b/gtsam_unstable/linear/iterative.h @@ -44,6 +44,7 @@ namespace gtsam { * Needed to run Conjugate Gradients on matrices * */ class System { + private: const Matrix& A_; const Vector& b_;