old spcg solver fixed
parent
9fdb28f9bf
commit
734a18b02e
|
@ -16,6 +16,8 @@
|
|||
* @date June 2, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/linear/SimpleSPCGSolver.h>
|
||||
#include <gtsam/linear/SubgraphSolver.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/pose2SLAM.h>
|
||||
|
||||
|
@ -61,10 +63,26 @@ int main(void) {
|
|||
cout << "initial error = " << graph.error(initialEstimate) << endl ;
|
||||
|
||||
// 4. Single Step Optimization using Levenberg-Marquardt
|
||||
// Note: Although there are many options in IterativeOptimizationParameters,
|
||||
Values result = graph.optimizeSPCG(initialEstimate);
|
||||
result.print("\nFinal result:\n");
|
||||
cout << "final error = " << graph.error(result) << endl;
|
||||
LevenbergMarquardtParams param;
|
||||
param.verbosity = NonlinearOptimizerParams::ERROR;
|
||||
param.verbosityLM = LevenbergMarquardtParams::LAMBDA;
|
||||
param.linearSolverType = SuccessiveLinearizationParams::CG;
|
||||
|
||||
{
|
||||
param.iterativeParams = boost::make_shared<SimpleSPCGSolverParameters>();
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, param);
|
||||
Values result = optimizer.optimize();
|
||||
result.print("\nFinal result:\n");
|
||||
cout << "simple spcg solver final error = " << graph.error(result) << endl;
|
||||
}
|
||||
|
||||
{
|
||||
param.iterativeParams = boost::make_shared<SubgraphSolverParameters>();
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, param);
|
||||
Values result = optimizer.optimize();
|
||||
result.print("\nFinal result:\n");
|
||||
cout << "subgraph solver final error = " << graph.error(result) << endl;
|
||||
}
|
||||
|
||||
return 0 ;
|
||||
}
|
||||
|
|
|
@ -36,16 +36,14 @@ struct ConjugateGradientParameters : public IterativeOptimizationParameters {
|
|||
SBM_MT ///< Sparse Block Matrix Multithreaded
|
||||
} blas_kernel_;
|
||||
|
||||
enum Verbosity { SILENT = 0, COMPLEXITY = 1, ERROR = 2} verbosity_ ; /* Verbosity */
|
||||
|
||||
ConjugateGradientParameters()
|
||||
: minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3), epsilon_abs_(1e-3),
|
||||
blas_kernel_(GTSAM), verbosity_(SILENT) {}
|
||||
blas_kernel_(GTSAM) {}
|
||||
|
||||
ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset,
|
||||
double epsilon_rel, double epsilon_abs, BLASKernel blas = GTSAM, Verbosity verbosity = SILENT)
|
||||
double epsilon_rel, double epsilon_abs, BLASKernel blas = GTSAM)
|
||||
: minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset),
|
||||
epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas), verbosity_(verbosity) {}
|
||||
epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas) {}
|
||||
|
||||
/* general interface */
|
||||
inline size_t minIterations() const { return minIterations_; }
|
||||
|
@ -55,7 +53,6 @@ struct ConjugateGradientParameters : public IterativeOptimizationParameters {
|
|||
inline double epsilon_rel() const { return epsilon_rel_; }
|
||||
inline double epsilon_abs() const { return epsilon_abs_; }
|
||||
inline BLASKernel blas_kernel() const { return blas_kernel_; }
|
||||
inline Verbosity verbosity() const { return verbosity_; }
|
||||
|
||||
void print() const {
|
||||
const std::string blasStr[3] = {"gtsam", "sbm", "sbm-mt"};
|
||||
|
@ -67,7 +64,6 @@ struct ConjugateGradientParameters : public IterativeOptimizationParameters {
|
|||
<< ", resetIter = " << reset_
|
||||
<< ", eps_rel = " << epsilon_rel_
|
||||
<< ", eps_abs = " << epsilon_abs_
|
||||
<< ", verbosity = " << verbosity_
|
||||
<< std::endl;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -86,6 +86,16 @@ Errors Errors::operator-(const Errors& b) const {
|
|||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Errors Errors::operator-() const {
|
||||
Errors result;
|
||||
BOOST_FOREACH(const Vector& ai, *this)
|
||||
result.push_back(-ai);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
double dot(const Errors& a, const Errors& b) {
|
||||
#ifndef NDEBUG
|
||||
|
|
|
@ -45,6 +45,8 @@ namespace gtsam {
|
|||
/** subtraction */
|
||||
Errors operator-(const Errors& b) const;
|
||||
|
||||
/** negation */
|
||||
Errors operator-() const ;
|
||||
|
||||
}; // Errors
|
||||
|
||||
|
|
|
@ -99,6 +99,24 @@ void optimizeInPlace(const GaussianBayesNet& bn, VectorValues& x) {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& input) {
|
||||
VectorValues output = input;
|
||||
BOOST_REVERSE_FOREACH(const boost::shared_ptr<const GaussianConditional> cg, bn) {
|
||||
const Index key = *(cg->beginFrontals());
|
||||
Vector xS = internal::extractVectorValuesSlices(output, cg->beginParents(), cg->endParents());
|
||||
xS = input[key] - cg->get_S() * xS;
|
||||
output[key] = cg->get_R().triangularView<Eigen::Upper>().solve(xS);
|
||||
}
|
||||
|
||||
BOOST_FOREACH(const boost::shared_ptr<const GaussianConditional> cg, bn) {
|
||||
cg->scaleFrontalsBySigma(output);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
// gy=inv(L)*gx by solving L*gy=gx.
|
||||
// gy=inv(R'*inv(Sigma))*gx
|
||||
|
|
|
@ -106,6 +106,12 @@ namespace gtsam {
|
|||
* */
|
||||
void optimizeGradientSearchInPlace(const GaussianBayesNet& bn, VectorValues& grad);
|
||||
|
||||
/**
|
||||
* Backsubstitute
|
||||
* gy=inv(R*inv(Sigma))*gx
|
||||
*/
|
||||
VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& gx);
|
||||
|
||||
/**
|
||||
* Transpose Backsubstitute
|
||||
* gy=inv(L)*gx by solving L*gy=gx.
|
||||
|
|
|
@ -23,8 +23,7 @@ namespace gtsam {
|
|||
public:
|
||||
|
||||
typedef boost::shared_ptr<IterativeOptimizationParameters> shared_ptr;
|
||||
|
||||
enum Kernel { PCG = 0, LSPCG = 1 } kernel_ ; ///< Iterative Method Kernel
|
||||
enum Kernel { CG = 0 } kernel_ ; ///< Iterative Method Kernel
|
||||
enum Verbosity { SILENT = 0, COMPLEXITY = 1, ERROR = 2} verbosity_ ; ///< Verbosity
|
||||
|
||||
public:
|
||||
|
@ -32,7 +31,7 @@ namespace gtsam {
|
|||
IterativeOptimizationParameters(const IterativeOptimizationParameters &p)
|
||||
: kernel_(p.kernel_), verbosity_(p.verbosity_) {}
|
||||
|
||||
IterativeOptimizationParameters(Kernel kernel = LSPCG, Verbosity verbosity = SILENT)
|
||||
IterativeOptimizationParameters(Kernel kernel = CG, Verbosity verbosity = SILENT)
|
||||
: kernel_(kernel), verbosity_(verbosity) {}
|
||||
|
||||
virtual ~IterativeOptimizationParameters() {}
|
||||
|
@ -42,7 +41,7 @@ namespace gtsam {
|
|||
inline Verbosity verbosity() const { return verbosity_; }
|
||||
|
||||
void print() const {
|
||||
const std::string kernelStr[2] = {"pcg", "lspcg"};
|
||||
const std::string kernelStr[1] = {"cg"};
|
||||
std::cout << "IterativeOptimizationParameters: "
|
||||
<< "kernel = " << kernelStr[kernel_]
|
||||
<< ", verbosity = " << verbosity_ << std::endl;
|
||||
|
|
|
@ -96,9 +96,7 @@ VectorValues::shared_ptr SimpleSPCGSolver::optimize (const VectorValues &initial
|
|||
|
||||
double gamma = s.vector().squaredNorm(), new_gamma = 0.0, alpha = 0.0, beta = 0.0 ;
|
||||
|
||||
const double threshold =
|
||||
std::max(parameters_.epsilon_abs(),
|
||||
parameters_.epsilon() * parameters_.epsilon() * gamma);
|
||||
const double threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma);
|
||||
const size_t iMaxIterations = parameters_.maxIterations();
|
||||
const Parameters::Verbosity verbosity = parameters_.verbosity();
|
||||
|
||||
|
@ -217,10 +215,6 @@ SimpleSPCGSolver::splitGraph(const GaussianFactorGraph &gfg) {
|
|||
else Ac->push_back(boost::dynamic_pointer_cast<JacobianFactor>(gf));
|
||||
}
|
||||
|
||||
// gfg.print("gfg");
|
||||
// At->print("At");
|
||||
// Ac->print("Ac");
|
||||
|
||||
return boost::tie(At, Ac);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
struct SimpleSPCGSolverParameters : public ConjugateGradientParameters {
|
||||
typedef ConjugateGradientParameters Base;
|
||||
SimpleSPCGSolverParameters() : Base() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* This class gives a simple implementation to the SPCG solver presented in Dellaert et al in IROS'10.
|
||||
*
|
||||
|
@ -40,7 +45,7 @@ class SimpleSPCGSolver : public IterativeSolver {
|
|||
public:
|
||||
|
||||
typedef IterativeSolver Base;
|
||||
typedef ConjugateGradientParameters Parameters;
|
||||
typedef SimpleSPCGSolverParameters Parameters;
|
||||
typedef boost::shared_ptr<IterativeSolver> shared_ptr;
|
||||
|
||||
protected:
|
||||
|
|
|
@ -28,27 +28,17 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
SubgraphPreconditioner::SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2,
|
||||
const sharedBayesNet& Rc1, const sharedValues& xbar) :
|
||||
Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(gaussianErrors_(*Ab2_,*xbar)) {
|
||||
Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(new Errors(-gaussianErrors(*Ab2_,*xbar))) {
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// x = xbar + inv(R1)*y
|
||||
VectorValues SubgraphPreconditioner::x(const VectorValues& y) const {
|
||||
VectorValues x = y;
|
||||
optimizeInPlace(*Rc1_,x);
|
||||
x += *xbar_;
|
||||
return x;
|
||||
return *xbar_ + gtsam::backSubstitute(*Rc1_, y);
|
||||
}
|
||||
|
||||
// SubgraphPreconditioner SubgraphPreconditioner::add_priors(double sigma) const {
|
||||
// SubgraphPreconditioner result = *this ;
|
||||
// result.Ab2_ = sharedFG(new GaussianFactorGraph(Ab2_->add_priors(sigma))) ;
|
||||
// return result ;
|
||||
// }
|
||||
|
||||
/* ************************************************************************* */
|
||||
double error(const SubgraphPreconditioner& sp, const VectorValues& y) {
|
||||
|
||||
Errors e(y);
|
||||
VectorValues x = sp.x(y);
|
||||
Errors e2 = gaussianErrors(*sp.Ab2(),x);
|
||||
|
@ -58,12 +48,11 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
// gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar),
|
||||
VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y) {
|
||||
VectorValues x = sp.x(y); // x = inv(R1)*y
|
||||
Errors e2 = gaussianErrors(*sp.Ab2(),x);
|
||||
VectorValues gx2 = VectorValues::Zero(y);
|
||||
gtsam::transposeMultiplyAdd(*sp.Ab2(),1.0,e2,gx2); // A2'*e2;
|
||||
VectorValues gy2 = gtsam::backSubstituteTranspose(*sp.Rc1(), gx2); // inv(R1')*gx2
|
||||
return y + gy2;
|
||||
VectorValues x = gtsam::backSubstitute(*sp.Rc1(), y); /* inv(R1)*y */
|
||||
Errors e = (*sp.Ab2()*x - *sp.b2bar()); /* (A2*inv(R1)*y-b2bar) */
|
||||
VectorValues v = VectorValues::Zero(x);
|
||||
transposeMultiplyAdd(*sp.Ab2(), 1.0, e, v); /* A2'*(A2*inv(R1)*y-b2bar) */
|
||||
return y + gtsam::backSubstituteTranspose(*sp.Rc1(), v);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -71,13 +60,9 @@ namespace gtsam {
|
|||
Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y) {
|
||||
|
||||
Errors e(y);
|
||||
|
||||
// Add A2 contribution
|
||||
VectorValues x = y; // TODO avoid ?
|
||||
gtsam::optimizeInPlace(*sp.Rc1(), x); // x=inv(R1)*y
|
||||
Errors e2 = *sp.Ab2() * x; // A2*x
|
||||
VectorValues x = gtsam::backSubstitute(*sp.Rc1(), y); /* x=inv(R1)*y */
|
||||
Errors e2 = *sp.Ab2() * x; /* A2*x */
|
||||
e.splice(e.end(), e2);
|
||||
|
||||
return e;
|
||||
}
|
||||
|
||||
|
@ -85,16 +70,14 @@ namespace gtsam {
|
|||
// In-place version that overwrites e
|
||||
void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e) {
|
||||
|
||||
|
||||
Errors::iterator ei = e.begin();
|
||||
for ( Index i = 0 ; i < y.size() ; ++i, ++ei ) {
|
||||
*ei = y[i];
|
||||
}
|
||||
|
||||
// Add A2 contribution
|
||||
VectorValues x = y; // TODO avoid ?
|
||||
gtsam::optimizeInPlace(*sp.Rc1(), x); // x=inv(R1)*y
|
||||
gtsam::multiplyInPlace(*sp.Ab2(),x,ei); // use iterator version
|
||||
VectorValues x = gtsam::backSubstitute(*sp.Rc1(), y); // x=inv(R1)*y
|
||||
gtsam::multiplyInPlace(*sp.Ab2(), x, ei); // use iterator version
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -114,13 +97,12 @@ namespace gtsam {
|
|||
void transposeMultiplyAdd
|
||||
(const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y) {
|
||||
|
||||
|
||||
Errors::const_iterator it = e.begin();
|
||||
for ( Index i = 0 ; i < y.size() ; ++i, ++it ) {
|
||||
const Vector& ei = *it;
|
||||
axpy(alpha,ei,y[i]);
|
||||
axpy(alpha, ei, y[i]);
|
||||
}
|
||||
sp.transposeMultiplyAdd2(alpha,it,e.end(),y);
|
||||
sp.transposeMultiplyAdd2(alpha, it, e.end(), y);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -42,8 +42,8 @@ namespace gtsam {
|
|||
private:
|
||||
sharedFG Ab1_, Ab2_;
|
||||
sharedBayesNet Rc1_;
|
||||
sharedValues xbar_;
|
||||
sharedErrors b2bar_; /** b2 - A2*xbar */
|
||||
sharedValues xbar_; ///< A1 \ b1
|
||||
sharedErrors b2bar_; ///< A2*xbar - b2
|
||||
|
||||
public:
|
||||
|
||||
|
@ -66,6 +66,9 @@ namespace gtsam {
|
|||
/** Access Rc1 */
|
||||
const sharedBayesNet& Rc1() const { return Rc1_; }
|
||||
|
||||
/** Access b2bar */
|
||||
const sharedErrors b2bar() const { return b2bar_; }
|
||||
|
||||
/**
|
||||
* Add zero-mean i.i.d. Gaussian prior terms to each variable
|
||||
* @param sigma Standard deviation of Gaussian
|
||||
|
|
|
@ -61,9 +61,7 @@ SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters
|
|||
|
||||
VectorValues::shared_ptr SubgraphSolver::optimize() {
|
||||
|
||||
// preconditioned conjugate gradient
|
||||
VectorValues zeros = pc_->zero();
|
||||
VectorValues ybar = conjugateGradients<SubgraphPreconditioner, VectorValues, Errors> (*pc_, zeros, parameters_);
|
||||
VectorValues ybar = conjugateGradients<SubgraphPreconditioner, VectorValues, Errors>(*pc_, pc_->zero(), parameters_);
|
||||
|
||||
boost::shared_ptr<VectorValues> xbar = boost::make_shared<VectorValues>() ;
|
||||
*xbar = pc_->x(ybar);
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
struct SubgraphSolverParameters : public ConjugateGradientParameters {
|
||||
typedef ConjugateGradientParameters Base;
|
||||
SubgraphSolverParameters() : Base() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* A linear system solver using subgraph preconditioning conjugate gradient
|
||||
*/
|
||||
|
@ -27,18 +32,17 @@ class SubgraphSolver : public IterativeSolver {
|
|||
|
||||
public:
|
||||
|
||||
typedef ConjugateGradientParameters Parameters;
|
||||
typedef SubgraphSolverParameters Parameters;
|
||||
|
||||
protected:
|
||||
|
||||
Parameters parameters_;
|
||||
|
||||
SubgraphPreconditioner::shared_ptr pc_; ///< preconditioner object
|
||||
|
||||
public:
|
||||
|
||||
SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters);
|
||||
|
||||
virtual ~SubgraphSolver() {}
|
||||
|
||||
virtual VectorValues::shared_ptr optimize () ;
|
||||
|
||||
protected:
|
||||
|
|
|
@ -76,15 +76,19 @@ void LevenbergMarquardtOptimizer::iterate() {
|
|||
}
|
||||
else if ( params_.isCG() ) {
|
||||
|
||||
ConjugateGradientParameters::shared_ptr params (!params_.iterativeParams ?
|
||||
boost::make_shared<ConjugateGradientParameters>() :
|
||||
boost::dynamic_pointer_cast<ConjugateGradientParameters>(params_.iterativeParams));
|
||||
if ( !params_.iterativeParams ) throw runtime_error("LMSolver: cg parameter has to be assigned ...");
|
||||
|
||||
if ( !params ) throw runtime_error("LMSolver: spcg parameter dynamic casting failed");
|
||||
|
||||
SimpleSPCGSolver solver(dampedSystem, *params);
|
||||
//SubgraphSolver solver(dampedSystem, *params);
|
||||
delta = *solver.optimize();
|
||||
if ( boost::dynamic_pointer_cast<SimpleSPCGSolverParameters>(params_.iterativeParams)) {
|
||||
SimpleSPCGSolver solver (dampedSystem, *boost::dynamic_pointer_cast<SimpleSPCGSolverParameters>(params_.iterativeParams));
|
||||
delta = *solver.optimize();
|
||||
}
|
||||
else if ( boost::dynamic_pointer_cast<SubgraphSolverParameters>(params_.iterativeParams) ) {
|
||||
SubgraphSolver solver (dampedSystem, *boost::dynamic_pointer_cast<SubgraphSolverParameters>(params_.iterativeParams));
|
||||
delta = *solver.optimize();
|
||||
}
|
||||
else {
|
||||
throw runtime_error("LMSolver: special cg parameter type is not handled in LM solver ...");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination");
|
||||
|
|
|
@ -118,13 +118,10 @@ namespace gtsam {
|
|||
// conjugate gradient method.
|
||||
// S: linear system, V: step vector, E: errors
|
||||
template<class S, class V, class E>
|
||||
V conjugateGradients(
|
||||
const S& Ab,
|
||||
V x,
|
||||
const ConjugateGradientParameters ¶meters,
|
||||
bool steepest = false) {
|
||||
V conjugateGradients(const S& Ab, V x, const ConjugateGradientParameters ¶meters, bool steepest = false) {
|
||||
|
||||
CGState<S, V, E> state(Ab, x, parameters, steepest);
|
||||
|
||||
if (parameters.verbosity() != ConjugateGradientParameters::SILENT)
|
||||
std::cout << "CG: epsilon = " << parameters.epsilon()
|
||||
<< ", maxIterations = " << parameters.maxIterations()
|
||||
|
|
|
@ -44,6 +44,7 @@ namespace gtsam {
|
|||
* Needed to run Conjugate Gradients on matrices
|
||||
* */
|
||||
class System {
|
||||
|
||||
private:
|
||||
const Matrix& A_;
|
||||
const Vector& b_;
|
||||
|
|
Loading…
Reference in New Issue