add linear system solver as a template class parameter in nonlinear optimizer.

release/4.3a0
Kai Ni 2010-01-18 05:51:19 +00:00
parent a4a6e002e5
commit 05b07d443e
14 changed files with 294 additions and 51 deletions

View File

@ -293,7 +293,6 @@ PredecessorMap<Key> FactorGraph<Factor>::findMinimumSpanningTree() const {
for (vi = p_map.begin(); vi!=p_map.end(); itVertex++, vi++) {
Key key = boost::get(boost::vertex_name, g, *itVertex);
Key parent = boost::get(boost::vertex_name, g, *vi);
// printf("%s parent: %s\n", key.c_str(), parent.c_str());
tree.insert(key, parent);
}

View File

@ -220,4 +220,31 @@ namespace gtsam {
size_t maxIterations = 0) const;
};
/**
* A linear system solver using factorization
*/
template <class NonlinearGraph, class Config>
class Factorization {
public:
Factorization() {}
Factorization(const NonlinearGraph& g, const Config& config) {}
/**
* solve for the optimal displacement in the tangent space, and then solve
* the resulted linear system
*/
VectorConfig optimize(GaussianFactorGraph& fg, const Ordering& ordering) const {
return fg.optimize(ordering);
}
/**
* linearize the non-linear graph around the current config,
*/
VectorConfig linearizeAndOptimize(const NonlinearGraph& g, const Config& config,
const Ordering& ordering) const {
GaussianFactorGraph linear = g.linearize(config);
return optimize(linear, ordering);
}
};
}

View File

@ -141,5 +141,8 @@ namespace gtsam {
}
};
template<class Key> Symbol key2symbol(Key key) {
return Symbol(key);
}
} // namespace gtsam

View File

@ -117,7 +117,7 @@ testNoiseModel_SOURCES = testNoiseModel.cpp
testNoiseModel_LDADD = libgtsam.la
# Iterative Methods
headers += iterative-inl.h
headers += iterative-inl.h SubgraphPreconditioner-inl.h
sources += iterative.cpp BayesNetPreconditioner.cpp SubgraphPreconditioner.cpp
check_PROGRAMS += testIterative testBayesNetPreconditioner testSubgraphPreconditioner
testIterative_SOURCES = $(example) testIterative.cpp

View File

@ -44,37 +44,38 @@ namespace gtsam {
}
/* ************************************************************************* */
// Constructors
// Constructors without the solver
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C>::NonlinearOptimizer(shared_graph graph,
shared_ordering ordering, shared_config config, double lambda) :
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS>::NonlinearOptimizer(shared_graph graph,
shared_ordering ordering, shared_config config, shared_solver solver, double lambda) :
graph_(graph), ordering_(ordering), config_(config), error_(graph->error(
*config)), lambda_(lambda) {
*config)), lambda_(lambda), solver_(solver) {
}
/* ************************************************************************* */
// Constructors without the solver
/* ************************************************************************* */
// template<class G, class C, class LS>
// NonlinearOptimizer<G, C, LS>::NonlinearOptimizer(shared_graph graph,
// shared_ordering ordering, shared_config config, double lambda) :
// graph_(graph), ordering_(ordering), config_(config), error_(graph->error(
// *config)), lambda_(lambda), solver_(new LS(*graph, *config)){
// }
/* ************************************************************************* */
// linearize and optimize
/* ************************************************************************* */
template<class G, class C>
VectorConfig NonlinearOptimizer<G, C>::linearizeAndOptimizeForDelta() const {
// linearize the non-linear graph around the current config
// which gives a linear optimization problem in the tangent space
GaussianFactorGraph linear = graph_->linearize(*config_);
// solve for the optimal displacement in the tangent space
VectorConfig delta = linear.optimize(*ordering_);
// return
return delta;
template<class G, class C, class LS>
VectorConfig NonlinearOptimizer<G, C, LS>::linearizeAndOptimizeForDelta() const {
return solver_->linearizeAndOptimize(*graph_, *config_, *ordering_);
}
/* ************************************************************************* */
// One iteration of Gauss Newton
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::iterate(
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::iterate(
verbosityLevel verbosity) const {
// linearize and optimize
@ -91,7 +92,7 @@ namespace gtsam {
if (verbosity >= CONFIG)
newConfig->print("newConfig");
NonlinearOptimizer newOptimizer = NonlinearOptimizer(graph_, ordering_, newConfig);
NonlinearOptimizer newOptimizer = NonlinearOptimizer(graph_, ordering_, newConfig, solver_);
if (verbosity >= ERROR)
cout << "error: " << newOptimizer.error_ << endl;
@ -100,8 +101,8 @@ namespace gtsam {
}
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::gaussNewton(
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::gaussNewton(
double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations) const {
// linearize, solve, update
@ -124,8 +125,8 @@ namespace gtsam {
// optimizer if error decreased or recurse with a larger lambda if not.
// TODO: in theory we can't infinitely recurse, but maybe we should put a max.
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::try_lambda(
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::try_lambda(
const GaussianFactorGraph& linear, verbosityLevel verbosity, double factor) const {
if (verbosity >= TRYLAMBDA)
@ -137,7 +138,7 @@ namespace gtsam {
damped.print("damped");
// solve
VectorConfig delta = damped.optimize(*ordering_);
VectorConfig delta = solver_->optimize(damped, *ordering_);
if (verbosity >= TRYDELTA)
delta.print("delta");
@ -147,14 +148,14 @@ namespace gtsam {
newConfig->print("config");
// create new optimization state with more adventurous lambda
NonlinearOptimizer next(graph_, ordering_, newConfig, lambda_ / factor);
NonlinearOptimizer next(graph_, ordering_, newConfig, solver_, lambda_ / factor);
// if error decreased, return the new state
if (next.error_ <= error_)
return next;
else {
// TODO: can we avoid copying the config ?
NonlinearOptimizer cautious(graph_, ordering_, config_, lambda_ * factor);
NonlinearOptimizer cautious(graph_, ordering_, config_, solver_, lambda_ * factor);
return cautious.try_lambda(linear, verbosity, factor);
}
}
@ -162,8 +163,8 @@ namespace gtsam {
/* ************************************************************************* */
// One iteration of Levenberg Marquardt
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::iterateLM(
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::iterateLM(
verbosityLevel verbosity, double lambdaFactor) const {
// maybe show output
@ -184,8 +185,8 @@ namespace gtsam {
}
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::levenbergMarquardt(
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::levenbergMarquardt(
double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations, double lambdaFactor) const {

View File

@ -20,12 +20,12 @@ namespace gtsam {
* and then one of the optimization routines is called. These recursively iterate
* until convergence. All methods are functional and return a new state.
*
* The class is parameterized by the Graph type and Config class type, the latter
* in order to be able to optimize over non-vector configurations as well.
* The class is parameterized by the Graph type, Config class type and the linear solver type.
* the config type is in order to be able to optimize over non-vector configurations as well.
* To use in code, include <gtsam/NonlinearOptimizer-inl.h> in your cpp file
* (the trick in http://www.ddj.com/cpp/184403420 did not work).
*/
template<class FactorGraph, class Config>
template<class FactorGraph, class Config, class LinearSolver = Factorization<FactorGraph, Config> >
class NonlinearOptimizer {
public:
@ -33,6 +33,7 @@ namespace gtsam {
typedef boost::shared_ptr<const Config> shared_config;
typedef boost::shared_ptr<const FactorGraph> shared_graph;
typedef boost::shared_ptr<const Ordering> shared_ordering;
typedef boost::shared_ptr<const LinearSolver> shared_solver;
enum verbosityLevel {
SILENT,
@ -63,6 +64,9 @@ namespace gtsam {
// TODO: red flag, should we have an LM class ?
const double lambda_;
// the linear system solver
const shared_solver solver_;
// Recursively try to do tempered Gauss-Newton steps until we succeed
NonlinearOptimizer try_lambda(const GaussianFactorGraph& linear,
verbosityLevel verbosity, double factor) const;
@ -73,14 +77,15 @@ namespace gtsam {
* Constructor
*/
NonlinearOptimizer(shared_graph graph, shared_ordering ordering,
shared_config config, double lambda = 1e-5);
shared_config config, shared_solver solver = shared_solver(new LinearSolver),
double lambda = 1e-5);
/**
* Copy constructor
*/
NonlinearOptimizer(const NonlinearOptimizer<FactorGraph, Config> &optimizer) :
graph_(optimizer.graph_), ordering_(optimizer.ordering_), config_(optimizer.config_),
error_(optimizer.error_), lambda_(optimizer.lambda_) {}
error_(optimizer.error_), lambda_(optimizer.lambda_), solver_(optimizer.solver_) {}
/**
* Return current error

View File

@ -0,0 +1,61 @@
/*
* SubgraphPreconditioner-inl.h
*
* Created on: Jan 17, 2010
* Author: nikai
* Description: subgraph preconditioning conjugate gradient solver
*/
#pragma once
#include <boost/tuple/tuple.hpp>
#include "SubgraphPreconditioner.h"
#include "Ordering-inl.h"
#include "iterative-inl.h"
#include "FactorGraph-inl.h"
using namespace std;
namespace gtsam {
/* ************************************************************************* */
template<class Graph, class Config>
SubgraphPCG<Graph, Config>::SubgraphPCG(const Graph& G, const Config& config) :
maxIterations_(100), verbose_(false), epsilon_(1e-4), epsilon_abs_(1e-5) {
// generate spanning tree and create ordering
PredecessorMap<Key> tree = G.template findMinimumSpanningTree<Key, Constraint>();
list<Key> keys = predecessorMap2Keys(tree);
// split the graph
Key root = keys.back();
if (verbose_) cout << "generating spanning tree and split the graph ...";
G.template split<Key, Constraint>(tree, T_, C_);
if (verbose_) cout << T_.size() << " and " << C_.size() << " factors" << endl;
// theta_bar = composePoses<Graph, Constraint, Pose, Config> (T_, tree, config[root]);
}
/* ************************************************************************* */
template<class Graph, class Config>
VectorConfig SubgraphPCG<Graph, Config>::linearizeAndOptimize(const Graph& g,
const Config& theta_bar, const Ordering& ordering) const {
VectorConfig zeros;
BOOST_FOREACH(const string& j, ordering) zeros.insert(j,zero(3));
// build the subgraph PCG system
GaussianFactorGraph Ab1 = T_.linearize(theta_bar);
GaussianFactorGraph Ab2 = C_.linearize(theta_bar);
const GaussianBayesNet Rc1 = Ab1.eliminate(ordering);
VectorConfig xbar = gtsam::optimize(Rc1);
SubgraphPreconditioner system(Rc1, Ab2, xbar);
// Solve the subgraph PCG
VectorConfig ybar = conjugateGradients<SubgraphPreconditioner, VectorConfig,
Errors> (system, zeros, verbose_, epsilon_, epsilon_abs_, maxIterations_);
VectorConfig xbar2 = system.x(ybar);
return xbar2;
}
}

View File

@ -96,8 +96,8 @@ namespace gtsam {
return y1 + y2;
}
/* ************************************************************************* */
/* ************************************************************************* */
void SubgraphPreconditioner::print(const std::string& s) const {
cout << s << endl;
Ab2_.print();

View File

@ -46,7 +46,7 @@ namespace gtsam {
/* error, given y */
double error(const VectorConfig& y) const;
/** gradient */
/** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */
VectorConfig gradient(const VectorConfig& y) const;
/** Apply operator A */
@ -59,6 +59,43 @@ namespace gtsam {
void print(const std::string& s = "SubgraphPreconditioner") const;
};
class Ordering;
/**
* A linear system solver using subgraph preconditioning conjugate gradient
*/
template <class NonlinearGraph, class Config>
class SubgraphPCG {
private:
typedef typename Config::Key Key;
typedef typename NonlinearGraph::Constraint Constraint;
typedef typename NonlinearGraph::Pose Pose;
const size_t maxIterations_;
const bool verbose_;
const double epsilon_, epsilon_abs_;
NonlinearGraph T_, C_;
public:
SubgraphPCG() {}
SubgraphPCG(const NonlinearGraph& G, const Config& config);
/**
* solve for the optimal displacement in the tangent space, and then solve
* the resulted linear system
*/
VectorConfig optimize(GaussianFactorGraph& fg, const Ordering& ordering) const {
throw std::runtime_error("SubgraphPCG:: optimize is not supported!");
}
/**
* linearize the non-linear graph around the current config,
*/
VectorConfig linearizeAndOptimize(const NonlinearGraph& g, const Config& config,
const Ordering& ordering) const;
};
} // nsamespace gtsam
#endif /* SUBGRAPHPRECONDITIONER_H_ */

View File

@ -40,6 +40,8 @@ namespace gtsam {
// Graph
struct Graph: public NonlinearFactorGraph<Config> {
typedef BetweenFactor<Config, Key, Pose2> Constraint;
typedef Pose2 Pose;
void addPrior(const Key& i, const Pose2& p, const sharedGaussian& model);
void addConstraint(const Key& i, const Key& j, const Pose2& z, const sharedGaussian& model);
void addHardConstraint(const Key& i, const Pose2& p);

View File

@ -17,8 +17,12 @@ using namespace boost::assign;
#include "iterative.h"
#include "smallExample.h"
#include "pose2SLAM.h"
#include "SubgraphPreconditioner.h"
#include "Ordering-inl.h"
#include "FactorGraph-inl.h"
#include "NonlinearFactorGraph-inl.h"
#include "iterative-inl.h"
using namespace std;
using namespace gtsam;
@ -111,10 +115,8 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint )
config.insert(2, Pose2(1.5,0.,0.));
Pose2Graph graph;
Matrix cov = eye(3);
Matrix cov2 = eye(3) * 1e-10;
graph.addPrior(1, Pose2(0.,0.,0.), cov2);
graph.addConstraint(1,2, Pose2(1.,0.,0.), cov);
graph.addPrior(1, Pose2(0.,0.,0.), eye(3) * 1e-10);
graph.addConstraint(1,2, Pose2(1.,0.,0.), eye(3));
VectorConfig zeros;
zeros.insert("x1",zero(3));
@ -128,6 +130,53 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint )
expected.insert("x2", Vector_(3,-0.5,0.,0.));
CHECK(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST( Iterative, subgraphPCG )
{
typedef Pose2Config::Key Key;
Pose2Config theta_bar;
theta_bar.insert(1, Pose2(0.,0.,0.));
theta_bar.insert(2, Pose2(1.5,0.,0.));
Pose2Graph graph;
graph.addPrior(1, Pose2(0.,0.,0.), eye(3) * 1e-10);
graph.addConstraint(1,2, Pose2(1.,0.,0.), eye(3));
VectorConfig zeros;
zeros.insert("x1",zero(3));
zeros.insert("x2",zero(3));
// generate spanning tree and create ordering
PredecessorMap<Key> tree = graph.findMinimumSpanningTree<Key, Pose2Factor>();
list<Key> keys = predecessorMap2Keys(tree);
list<Symbol> symbols;
symbols.resize(keys.size());
std::transform(keys.begin(), keys.end(), symbols.begin(), key2symbol<Key>);
Ordering ordering(symbols);
Key root = keys.back();
Pose2Graph T, C;
graph.split<Key, Pose2Factor>(tree, T, C);
// build the subgraph PCG system
GaussianFactorGraph Ab1 = T.linearize(theta_bar);
GaussianFactorGraph Ab2 = C.linearize(theta_bar);
const GaussianBayesNet Rc1 = Ab1.eliminate(ordering);
VectorConfig xbar = optimize(Rc1);
SubgraphPreconditioner system(Rc1, Ab2, xbar);
// Solve the subgraph PCG
VectorConfig ybar = conjugateGradients<SubgraphPreconditioner, VectorConfig,
Errors> (system, zeros, false, 1e-5, 1e-5, 100);
VectorConfig actual = system.x(ybar);
VectorConfig expected;
expected.insert("x1", zero(3));
expected.insert("x2", Vector_(3, -0.5, 0., 0.));
CHECK(assert_equal(expected, actual));
}
/* ************************************************************************* */
int main() {
TestResult tr;

View File

@ -20,10 +20,13 @@ using namespace boost;
#include "Matrix.h"
#include "Ordering.h"
#include "smallExample.h"
#include "pose2SLAM.h"
#include "GaussianFactorGraph.h"
// template definitions
#include "NonlinearFactorGraph-inl.h"
#include "NonlinearOptimizer-inl.h"
#include "SubgraphPreconditioner-inl.h"
using namespace gtsam;
@ -89,7 +92,8 @@ TEST( NonlinearOptimizer, iterateLM )
ord->push_back("x");
// create initial optimization state, with lambda=0
Optimizer optimizer(fg, ord, config, 0);
Optimizer::shared_solver solver(new Factorization<ExampleNonlinearFactorGraph, VectorConfig>);
Optimizer optimizer(fg, ord, config, solver, 0.);
// normal iterate
Optimizer iterated1 = optimizer.iterate();
@ -146,6 +150,61 @@ TEST( NonlinearOptimizer, optimize )
CHECK(assert_equal(*(actual2.config()),cstar));
}
/* ************************************************************************* */
TEST( NonlinearOptimizer, Factorization )
{
typedef NonlinearOptimizer<Pose2Graph, Pose2Config, Factorization<Pose2Graph, Pose2Config> > Optimizer;
boost::shared_ptr<Pose2Config> config(new Pose2Config);
config->insert(1, Pose2(0.,0.,0.));
config->insert(2, Pose2(1.5,0.,0.));
boost::shared_ptr<Pose2Graph> graph(new Pose2Graph);
graph->addPrior(1, Pose2(0.,0.,0.), eye(3) * 1e-10);
graph->addConstraint(1,2, Pose2(1.,0.,0.), eye(3));
boost::shared_ptr<Ordering> ordering(new Ordering);
ordering->push_back(Pose2Config::Key(1));
ordering->push_back(Pose2Config::Key(2));
Optimizer optimizer(graph, ordering, config);
Optimizer optimized = optimizer.iterateLM();
Pose2Config expected;
expected.insert(1, Pose2(0.,0.,0.));
expected.insert(2, Pose2(1.,0.,0.));
CHECK(assert_equal(expected, *optimized.config(), 1e-5));
}
/* ************************************************************************* */
TEST( NonlinearOptimizer, SubgraphPCG )
{
typedef NonlinearOptimizer<Pose2Graph, Pose2Config, SubgraphPCG<Pose2Graph, Pose2Config> > Optimizer;
boost::shared_ptr<Pose2Config> config(new Pose2Config);
config->insert(1, Pose2(0.,0.,0.));
config->insert(2, Pose2(1.5,0.,0.));
boost::shared_ptr<Pose2Graph> graph(new Pose2Graph);
graph->addPrior(1, Pose2(0.,0.,0.), eye(3) * 1e-10);
graph->addConstraint(1,2, Pose2(1.,0.,0.), eye(3));
boost::shared_ptr<Ordering> ordering(new Ordering);
ordering->push_back(Pose2Config::Key(1));
ordering->push_back(Pose2Config::Key(2));
double relativeThreshold = 1e-5;
double absoluteThreshold = 1e-5;
Optimizer::shared_solver solver(new SubgraphPCG<Pose2Graph, Pose2Config>(*graph, *config));
Optimizer optimizer(graph, ordering, config, solver);
Optimizer optimized = optimizer.gaussNewton(relativeThreshold, absoluteThreshold, Optimizer::SILENT);
Pose2Config expected;
expected.insert(1, Pose2(0.,0.,0.));
expected.insert(2, Pose2(1.,0.,0.));
CHECK(assert_equal(expected, *optimized.config(), 1e-5));
}
/* ************************************************************************* */
int main() {
TestResult tr;

View File

@ -299,7 +299,7 @@ TEST (SQP, two_pose_truth ) {
// optimize the graph
shared_ptr<Ordering> ordering(new Ordering());
*ordering += "x1", "x2", "l1";
Optimizer optimizer(graph, ordering, initialEstimate, 1e-5);
Optimizer optimizer(graph, ordering, initialEstimate);
// display solution
double relativeThreshold = 1e-5;
@ -523,7 +523,7 @@ TEST (SQP, stereo_truth ) {
*ord += "x1", "x2", "l1";
// create optimizer
VOptimizer optimizer(graph, ord, truthConfig, 1e-5);
VOptimizer optimizer(graph, ord, truthConfig);
// optimize
VOptimizer afterOneIteration = optimizer.iterate();
@ -599,7 +599,7 @@ TEST (SQP, stereo_truth_noisy ) {
*ord += "x1", "x2", "l1";
// create optimizer
VOptimizer optimizer0(graph, ord, noisyConfig, 1e-5);
VOptimizer optimizer0(graph, ord, noisyConfig);
if (verbose)
cout << "Initial Error: " << optimizer0.error() << endl;

View File

@ -100,7 +100,7 @@ TEST( Graph, optimizeLM)
// Create an optimizer and check its error
// We expect the initial to be zero because config is the ground truth
Optimizer optimizer(graph, ordering, initialEstimate, 1e-5);
Optimizer optimizer(graph, ordering, initialEstimate);
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
// Iterate once, and the config should not have changed because we started
@ -144,7 +144,7 @@ TEST( Graph, optimizeLM2)
// Create an optimizer and check its error
// We expect the initial to be zero because config is the ground truth
Optimizer optimizer(graph, ordering, initialEstimate, 1e-5);
Optimizer optimizer(graph, ordering, initialEstimate);
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
// Iterate once, and the config should not have changed because we started
@ -179,7 +179,7 @@ TEST( Graph, CHECK_ORDERING)
// Create an optimizer and check its error
// We expect the initial to be zero because config is the ground truth
Optimizer optimizer(graph, ordering, initialEstimate, 1e-5);
Optimizer optimizer(graph, ordering, initialEstimate);
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
// Iterate once, and the config should not have changed because we started